From 43f750ce6caab1edb80a4d367ea9f19ac59e12dc Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Mon, 18 Mar 2024 19:37:59 +0330 Subject: [PATCH 01/38] [ADD] Ignoring vagrant dir in git Signed-off-by: Pooya Azarpour --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index f18f07550..05d0e2dde 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,6 @@ e2e/debug # Charts .cr-release-packages/ .cr-index/ + +# Vagrant +.vagrant/ \ No newline at end of file From 2ea688a54b6bcc8ea15b5e35dbc98ad5786873ae Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Mon, 18 Mar 2024 19:39:05 +0330 Subject: [PATCH 02/38] [ADD] Add GO_EXEC variable for using multiply version of go binary Signed-off-by: Pooya Azarpour --- Makefile | 14 +++++++------- Makefile.restic-integration.vars.mk | 2 +- Makefile.vars.mk | 1 + e2e/kind.mk | 2 +- 4 files changed, 10 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index 0d3e4b409..5ac35a5fb 100644 --- a/Makefile +++ b/Makefile @@ -27,11 +27,11 @@ include Makefile.restic-integration.mk envtest/integration.mk # E2E tests -include e2e/Makefile -go_build ?= go build -o $(BIN_FILENAME) $(K8UP_MAIN_GO) +go_build ?= $(GO_EXEC) build -o $(BIN_FILENAME) $(K8UP_MAIN_GO) .PHONY: test test: ## Run tests - go test ./... -coverprofile cover.out + $(GO_EXEC) test ./... -coverprofile cover.out .PHONY: build build: generate fmt vet $(BIN_FILENAME) docs-update-usage ## Build manager binary @@ -41,7 +41,7 @@ run: export BACKUP_ENABLE_LEADER_ELECTION = $(ENABLE_LEADER_ELECTION) run: export K8UP_DEBUG = true run: export BACKUP_OPERATOR_NAMESPACE = default run: fmt vet ## Run against the configured Kubernetes cluster in ~/.kube/config. Use ARGS to pass arguments to the command, e.g. `make run ARGS="--help"` - go run $(K8UP_MAIN_GO) $(ARGS) $(CMD) $(CMD_ARGS) + $(GO_EXEC) run $(K8UP_MAIN_GO) $(ARGS) $(CMD) $(CMD_ARGS) .PHONY: run-operator run-operator: CMD := operator @@ -80,9 +80,9 @@ deploy: kind-load-image install ## Deploy controller in the configured Kubernete .PHONY: generate generate: ## Generate manifests e.g. CRD, RBAC etc. # Generate code - go run sigs.k8s.io/controller-tools/cmd/controller-gen object:headerFile=".github/boilerplate.go.txt" paths="./..." + $(GO_EXEC) run sigs.k8s.io/controller-tools/cmd/controller-gen object:headerFile=".github/boilerplate.go.txt" paths="./..." # Generate CRDs - go run sigs.k8s.io/controller-tools/cmd/controller-gen rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=$(CRD_ROOT_DIR)/v1 crd:crdVersions=v1 + $(GO_EXEC) run sigs.k8s.io/controller-tools/cmd/controller-gen rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=$(CRD_ROOT_DIR)/v1 crd:crdVersions=v1 .PHONY: crd crd: generate ## Generate CRD to file @@ -90,11 +90,11 @@ crd: generate ## Generate CRD to file .PHONY: fmt fmt: ## Run go fmt against code - go fmt ./... + $(GO_EXEC) fmt ./... .PHONY: vet vet: ## Run go vet against code - go vet ./... + $(GO_EXEC) vet ./... .PHONY: lint lint: fmt vet golangci-lint ## Invokes all linting targets diff --git a/Makefile.restic-integration.vars.mk b/Makefile.restic-integration.vars.mk index d3d0e2ff4..0555f857d 100644 --- a/Makefile.restic-integration.vars.mk +++ b/Makefile.restic-integration.vars.mk @@ -13,7 +13,7 @@ restore_dir ?= $(integrationtest_dir)/restore stats_url ?= http://localhost:8091 -restic_version ?= $(shell go mod edit -json | jq -r '.Require[] | select(.Path == "github.com/restic/restic").Version' | sed "s/v//") +restic_version ?= $(shell $(GO_EXEC) mod edit -json | jq -r '.Require[] | select(.Path == "github.com/restic/restic").Version' | sed "s/v//") restic_path ?= $(go_bin)/restic restic_pid ?= $(integrationtest_dir)/restic.pid restic_url ?= https://github.com/restic/restic/releases/download/v$(restic_version)/restic_$(restic_version)_$(os)_$(arch).bz2 diff --git a/Makefile.vars.mk b/Makefile.vars.mk index 781dc9e7b..9c0af0a7a 100644 --- a/Makefile.vars.mk +++ b/Makefile.vars.mk @@ -1,5 +1,6 @@ IMG_TAG ?= latest +GO_EXEC ?= go K8UP_MAIN_GO ?= cmd/k8up/main.go K8UP_GOOS ?= linux K8UP_GOARCH ?= amd64 diff --git a/e2e/kind.mk b/e2e/kind.mk index 901fef148..ea752fdff 100644 --- a/e2e/kind.mk +++ b/e2e/kind.mk @@ -34,4 +34,4 @@ $(KIND_KUBECONFIG): $(KIND) $(KIND): export GOBIN = $(go_bin) $(KIND): | $(go_bin) - go install sigs.k8s.io/kind@latest + $(GO_EXEC) install sigs.k8s.io/kind@latest From 8eb0703674617ebaf617178b412c6520dfdae6a6 Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Mon, 18 Mar 2024 19:41:40 +0330 Subject: [PATCH 03/38] [ADD] Add Volume for using secret or configmap in k8s, Add VolumeMounts for mount volume, Add BackendOpts for using custom options in k8up or restic Signed-off-by: Pooya Azarpour --- api/v1/backend.go | 10 + api/v1/restore_types.go | 11 +- api/v1/runnable_types.go | 23 + api/v1/zz_generated.deepcopy.go | 103 ++ .../v1/k8up.io_archives.yaml | 305 ++++ .../v1/k8up.io_backups.yaml | 254 +++ .../v1/k8up.io_checks.yaml | 254 +++ .../v1/k8up.io_prunes.yaml | 254 +++ .../v1/k8up.io_restores.yaml | 305 ++++ .../v1/k8up.io_schedules.yaml | 1475 +++++++++++++++++ 10 files changed, 2992 insertions(+), 2 deletions(-) diff --git a/api/v1/backend.go b/api/v1/backend.go index 7afdf6034..93a4333ba 100644 --- a/api/v1/backend.go +++ b/api/v1/backend.go @@ -25,6 +25,8 @@ type ( Swift *SwiftSpec `json:"swift,omitempty"` B2 *B2Spec `json:"b2,omitempty"` Rest *RestServerSpec `json:"rest,omitempty"` + + Options *BackendOpts `json:"options,omitempty"` } // +k8s:deepcopy-gen=false @@ -116,6 +118,7 @@ type S3Spec struct { Bucket string `json:"bucket,omitempty"` AccessKeyIDSecretRef *corev1.SecretKeySelector `json:"accessKeyIDSecretRef,omitempty"` SecretAccessKeySecretRef *corev1.SecretKeySelector `json:"secretAccessKeySecretRef,omitempty"` + VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } // EnvVars returns the env vars for this backend. @@ -265,6 +268,7 @@ type RestServerSpec struct { URL string `json:"url,omitempty"` UserSecretRef *corev1.SecretKeySelector `json:"userSecretRef,omitempty"` PasswordSecretReg *corev1.SecretKeySelector `json:"passwordSecretReg,omitempty"` + VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } // EnvVars returns the env vars for this backend. @@ -279,3 +283,9 @@ func (in *RestServerSpec) String() string { protocol, url, _ := strings.Cut(in.URL, "://") return fmt.Sprintf("rest:%s://%s:%s@%s", protocol, "$(USER)", "$(PASSWORD)", url) } + +type BackendOpts struct { + CACert string `json:"caCert,omitempty"` + ClientCert string `json:"clientCert,omitempty"` + ClientKey string `json:"clientKey,omitempty"` +} diff --git a/api/v1/restore_types.go b/api/v1/restore_types.go index 780b69c66..081c2f2b2 100644 --- a/api/v1/restore_types.go +++ b/api/v1/restore_types.go @@ -35,8 +35,9 @@ type RestoreSpec struct { // RestoreMethod contains how and where the restore should happen // all the settings are mutual exclusive. type RestoreMethod struct { - S3 *S3Spec `json:"s3,omitempty"` - Folder *FolderRestore `json:"folder,omitempty"` + S3 *S3Spec `json:"s3,omitempty"` + Folder *FolderRestore `json:"folder,omitempty"` + Options *RestoreOpts `json:"options,omitempty"` } type FolderRestore struct { @@ -145,3 +146,9 @@ func init() { var ( RestoreKind = reflect.TypeOf(Restore{}).Name() ) + +type RestoreOpts struct { + CACert string `json:"caCert,omitempty"` + ClientCert string `json:"clientCert,omitempty"` + ClientKey string `json:"clientKey,omitempty"` +} diff --git a/api/v1/runnable_types.go b/api/v1/runnable_types.go index a71249217..4ac1a8001 100644 --- a/api/v1/runnable_types.go +++ b/api/v1/runnable_types.go @@ -15,11 +15,34 @@ type RunnableSpec struct { // PodSecurityContext describes the security context with which this action shall be executed. PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"` + // Volumes List of volumes that can be mounted by containers belonging to the pod. + Volumes *[]RunnableVolumeSpec `json:"volumes,omitempty"` + // ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. // Value must be positive integer if given. ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` } +type RunnableVolumeSpec struct { + // name of the volume. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name"` + + // persistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *corev1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` + // configMap represents a configMap that should populate this volume + // +optional + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"` +} + // AppendEnvFromToContainer will add EnvFromSource from the given RunnableSpec to the Container func (in *RunnableSpec) AppendEnvFromToContainer(containerSpec *corev1.Container) { if in.Backend != nil { diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index f536307f7..071e11a83 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -210,6 +210,11 @@ func (in *Backend) DeepCopyInto(out *Backend) { *out = new(RestServerSpec) (*in).DeepCopyInto(*out) } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(BackendOpts) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend. @@ -222,6 +227,21 @@ func (in *Backend) DeepCopy() *Backend { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackendOpts) DeepCopyInto(out *BackendOpts) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendOpts. +func (in *BackendOpts) DeepCopy() *BackendOpts { + if in == nil { + return nil + } + out := new(BackendOpts) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backup) DeepCopyInto(out *Backup) { *out = *in @@ -784,6 +804,17 @@ func (in *RestServerSpec) DeepCopyInto(out *RestServerSpec) { *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = new([]corev1.VolumeMount) + if **in != nil { + in, out := *in, *out + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestServerSpec. @@ -868,6 +899,11 @@ func (in *RestoreMethod) DeepCopyInto(out *RestoreMethod) { *out = new(FolderRestore) (*in).DeepCopyInto(*out) } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = new(RestoreOpts) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreMethod. @@ -880,6 +916,21 @@ func (in *RestoreMethod) DeepCopy() *RestoreMethod { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreOpts) DeepCopyInto(out *RestoreOpts) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreOpts. +func (in *RestoreOpts) DeepCopy() *RestoreOpts { + if in == nil { + return nil + } + out := new(RestoreOpts) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSchedule) DeepCopyInto(out *RestoreSchedule) { *out = *in @@ -986,6 +1037,17 @@ func (in *RunnableSpec) DeepCopyInto(out *RunnableSpec) { *out = new(corev1.PodSecurityContext) (*in).DeepCopyInto(*out) } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = new([]RunnableVolumeSpec) + if **in != nil { + in, out := *in, *out + *out = make([]RunnableVolumeSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } if in.ActiveDeadlineSeconds != nil { in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds *out = new(int64) @@ -1003,6 +1065,36 @@ func (in *RunnableSpec) DeepCopy() *RunnableSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunnableVolumeSpec) DeepCopyInto(out *RunnableVolumeSpec) { + *out = *in + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(corev1.PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunnableVolumeSpec. +func (in *RunnableVolumeSpec) DeepCopy() *RunnableVolumeSpec { + if in == nil { + return nil + } + out := new(RunnableVolumeSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3Spec) DeepCopyInto(out *S3Spec) { *out = *in @@ -1016,6 +1108,17 @@ func (in *S3Spec) DeepCopyInto(out *S3Spec) { *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = new([]corev1.VolumeMount) + if **in != nil { + in, out := *in, *out + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec. diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml index 007a6cb3f..a03ba65da 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml @@ -252,6 +252,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -320,6 +329,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -369,6 +420,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -643,6 +736,15 @@ spec: required: - claimName type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object s3: properties: accessKeyIDSecretRef: @@ -691,6 +793,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object type: object snapshot: @@ -706,6 +850,167 @@ spec: items: type: string type: array + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object status: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml index 1fcddb7c4..1ea31e5aa 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml @@ -259,6 +259,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -327,6 +336,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -376,6 +427,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -649,6 +742,167 @@ spec: items: type: string type: array + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object status: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml index afc226fa2..46d761782 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml @@ -254,6 +254,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -322,6 +331,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -371,6 +422,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -632,6 +725,167 @@ spec: SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. type: integer + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object status: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml index 318a94b15..1aaf76bc8 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml @@ -254,6 +254,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -322,6 +331,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -371,6 +422,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -662,6 +755,167 @@ spec: SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. type: integer + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object status: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml index 7431ea3b6..f6e8d7684 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml @@ -254,6 +254,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -322,6 +331,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -371,6 +422,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -645,6 +738,15 @@ spec: required: - claimName type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object s3: properties: accessKeyIDSecretRef: @@ -693,6 +795,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object type: object snapshot: @@ -708,6 +852,167 @@ spec: items: type: string type: array + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap or its + keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or + its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object status: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml index 6e915dd40..4e3a98315 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml @@ -245,6 +245,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -313,6 +322,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -362,6 +413,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -638,6 +731,15 @@ spec: required: - claimName type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object s3: properties: accessKeyIDSecretRef: @@ -686,6 +788,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object type: object schedule: @@ -705,6 +849,169 @@ spec: items: type: string type: array + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object backend: description: |- @@ -902,6 +1209,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -970,6 +1286,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -1019,6 +1377,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -1234,6 +1634,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -1302,6 +1711,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -1351,6 +1802,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -1630,6 +2123,169 @@ spec: items: type: string type: array + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object check: description: CheckSchedule manages the schedules for the checks @@ -1837,6 +2493,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -1905,6 +2570,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -1954,6 +2661,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -2221,6 +2970,169 @@ spec: SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. type: integer + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object failedJobsHistoryLimit: description: |- @@ -2615,6 +3527,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -2683,6 +3604,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -2732,6 +3695,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -3029,6 +4034,169 @@ spec: SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. type: integer + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object resourceRequirementsTemplate: description: ResourceRequirementsTemplate describes the compute resource @@ -3292,6 +4460,15 @@ spec: mountPath: type: string type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -3360,6 +4537,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object s3: properties: @@ -3409,6 +4628,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object swift: properties: @@ -3685,6 +4946,15 @@ spec: required: - claimName type: object + options: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object s3: properties: accessKeyIDSecretRef: @@ -3733,6 +5003,48 @@ spec: - key type: object x-kubernetes-map-type: atomic + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object type: object schedule: @@ -3752,6 +5064,169 @@ spec: items: type: string type: array + volumes: + description: Volumes List of volumes that can be mounted by containers + belonging to the pod. + items: + properties: + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + required: + - name + type: object + type: array type: object successfulJobsHistoryLimit: description: |- From e13ba45445cb9283c96e68119fbb71ef0f6c5805 Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Mon, 18 Mar 2024 19:43:39 +0330 Subject: [PATCH 04/38] [ADD] Add vardir command option for mount emptyDir in pod Signed-off-by: Pooya Azarpour --- cmd/operator/main.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 90df903ae..70ecf5b9d 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -82,6 +82,8 @@ var ( &cli.BoolFlag{Destination: &cfg.Config.EnableLeaderElection, Name: "enable-leader-election", EnvVars: []string{"BACKUP_ENABLE_LEADER_ELECTION"}, Value: true, DefaultText: "enabled", Usage: "enable leader election within the operator Pod"}, &cli.StringFlag{Destination: &cfg.Config.BackupCheckSchedule, Name: "checkschedule", EnvVars: []string{"BACKUP_CHECKSCHEDULE"}, Value: "0 0 * * 0", Usage: "the default check schedule"}, &cli.StringFlag{Destination: &cfg.Config.OperatorNamespace, Name: "operator-namespace", EnvVars: []string{"BACKUP_OPERATOR_NAMESPACE"}, Required: true, Usage: "set the namespace in which the K8up operator itself runs"}, + + &cli.StringFlag{Destination: &cfg.Config.PodVarDir, Name: "vardir", EnvVars: []string{"VAR_DIR"}, Value: "/k8up", Usage: "the var data dir for read/write k8up data or temp file in pod"}, }, } ) From e2622c63d6f0085388c165f183fc20cabde320f1 Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Mon, 18 Mar 2024 19:45:22 +0330 Subject: [PATCH 05/38] [ADD] Supporting self certificate authority and mTls when using S3 object storage Signed-off-by: Pooya Azarpour --- cmd/restic/main.go | 86 ++++++++++--- envtest/envsuite.go | 1 + operator/archivecontroller/executor.go | 133 +++++++++++++++++--- operator/backupcontroller/backup_utils.go | 94 +++++++++++++- operator/backupcontroller/executor.go | 13 +- operator/cfg/config.go | 3 +- operator/checkcontroller/executor.go | 95 +++++++++++++- operator/prunecontroller/executor.go | 99 ++++++++++++++- operator/restorecontroller/executor.go | 113 ++++++++++++++++- operator/restorecontroller/executor_test.go | 2 +- operator/utils/utils.go | 23 ++++ restic/cfg/config.go | 8 ++ restic/cli/archive.go | 4 +- restic/cli/init.go | 9 +- restic/cli/restic.go | 31 +++++ restic/cli/restore.go | 40 ++++-- restic/cli/utils.go | 38 ++++++ restic/s3/client.go | 48 ++++++- 18 files changed, 765 insertions(+), 75 deletions(-) create mode 100644 operator/utils/utils.go create mode 100644 restic/cli/utils.go diff --git a/cmd/restic/main.go b/cmd/restic/main.go index d2725a2ea..fcdfd2533 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -20,8 +20,12 @@ import ( ) const ( - backupDirEnvKey = "BACKUP_DIR" - restoreDirEnvKey = "RESTORE_DIR" + backupDirEnvKey = "BACKUP_DIR" + restoreDirEnvKey = "RESTORE_DIR" + caCertFileEnvKey = "CA_CERT_FILE" + clientCertFileEnvKey = "CLIENT_CERT_FILE" + clientKeyFileEnvKey = "CLIENT_KEY_FILE" + workDirEnvKey = "WORK_DIR" restoreTypeArg = "restoreType" restoreS3EndpointArg = "restoreS3Endpoint" @@ -63,6 +67,9 @@ var ( &cli.StringFlag{Destination: &cfg.Config.RestoreS3AccessKey, Name: restoreS3AccessKeyIDArg, EnvVars: []string{"RESTORE_ACCESSKEYID"}, Usage: "S3 access key used to connect to the S3 endpoint when restoring"}, &cli.StringFlag{Destination: &cfg.Config.RestoreS3SecretKey, Name: restoreS3SecretAccessKeyArg, EnvVars: []string{"RESTORE_SECRETACCESSKEY"}, Usage: "S3 secret key used to connect to the S3 endpoint when restoring"}, &cli.StringFlag{Destination: &cfg.Config.RestoreS3Endpoint, Name: restoreS3EndpointArg, EnvVars: []string{"RESTORE_S3ENDPOINT"}, Usage: "S3 endpoint to connect to when restoring, e.g. 'https://minio.svc:9000/backup"}, + &cli.PathFlag{Destination: &cfg.Config.RestoreCACert, Name: "restoreCaCert", Usage: "The certificate authority file path using for restore (If isn't filled, using caCert)"}, + &cli.PathFlag{Destination: &cfg.Config.RestoreClientCert, Name: "restoreClientCert", Usage: "The client certificate file path using for restore (If isn't filled, using clientCert)"}, + &cli.PathFlag{Destination: &cfg.Config.RestoreClientKey, Name: "restoreClientKey", Usage: "The client private key file path using for restore (If isn't filled, using clientKey)"}, &cli.BoolFlag{Destination: &cfg.Config.VerifyRestore, Name: "verifyRestore", Usage: "If the restore should get verified, only for PVCs restore"}, &cli.BoolFlag{Destination: &cfg.Config.RestoreTrimPath, Name: "trimRestorePath", EnvVars: []string{"TRIM_RESTOREPATH"}, Value: true, DefaultText: "enabled", Usage: "If set, strips the value of --restoreDir from the lefts side of the remote restore path value"}, @@ -87,6 +94,11 @@ var ( &cli.StringSliceFlag{Name: "targetPods", EnvVars: []string{"TARGET_PODS"}, Usage: "Filter list of pods by TARGET_PODS names"}, &cli.DurationFlag{Destination: &cfg.Config.SleepDuration, Name: "sleepDuration", EnvVars: []string{"SLEEP_DURATION"}, Usage: "Sleep for specified amount until init starts"}, + + &cli.PathFlag{Destination: &cfg.Config.VarDir, Name: "varDir", Value: "/k8up", Usage: "The var directory is stored k8up metadata files and temporary files"}, + &cli.PathFlag{Destination: &cfg.Config.CACert, Name: "caCert", EnvVars: []string{caCertFileEnvKey}, Usage: "The certificate authority file path"}, + &cli.PathFlag{Destination: &cfg.Config.ClientCert, Name: "clientCert", EnvVars: []string{clientCertFileEnvKey}, Usage: "The client certificate file path"}, + &cli.PathFlag{Destination: &cfg.Config.ClientKey, Name: "clientKey", EnvVars: []string{clientKeyFileEnvKey}, Usage: "The client private key file path"}, }, } ) @@ -197,30 +209,52 @@ func doCheck(resticCLI *resticCli.Restic) error { } func doRestore(resticCLI *resticCli.Restic) error { - if cfg.Config.DoRestore { - if err := resticCLI.Restore(cfg.Config.RestoreSnap, resticCli.RestoreOptions{ - RestoreType: resticCli.RestoreType(cfg.Config.RestoreType), - RestoreDir: cfg.Config.RestoreDir, - RestoreFilter: cfg.Config.RestoreFilter, - Verify: cfg.Config.VerifyRestore, - S3Destination: resticCli.S3Bucket{ - Endpoint: cfg.Config.RestoreS3Endpoint, - AccessKey: cfg.Config.RestoreS3AccessKey, - SecretKey: cfg.Config.RestoreS3SecretKey, - }, - }, cfg.Config.Tags); err != nil { - return fmt.Errorf("restore job failed: %w", err) - } + if !cfg.Config.DoRestore { + return nil + } + + restoreOptions := resticCli.RestoreOptions{ + RestoreType: resticCli.RestoreType(cfg.Config.RestoreType), + RestoreDir: cfg.Config.RestoreDir, + RestoreFilter: cfg.Config.RestoreFilter, + Verify: cfg.Config.VerifyRestore, + S3Destination: resticCli.S3Bucket{ + Endpoint: cfg.Config.RestoreS3Endpoint, + AccessKey: cfg.Config.RestoreS3AccessKey, + SecretKey: cfg.Config.RestoreS3SecretKey, + Cert: fillRestoreS3Cert(), + }, } + + if err := resticCLI.Restore(cfg.Config.RestoreSnap, restoreOptions, cfg.Config.Tags); err != nil { + return fmt.Errorf("restore job failed: %w", err) + } + return nil } func doArchive(resticCLI *resticCli.Restic) error { - if cfg.Config.DoArchive { - if err := resticCLI.Archive(cfg.Config.ResticBin, cfg.Config.VerifyRestore, cfg.Config.Tags); err != nil { - return fmt.Errorf("archive job failed: %w", err) - } + if !cfg.Config.DoArchive { + return nil } + + restoreOptions := resticCli.RestoreOptions{ + RestoreType: resticCli.RestoreType(cfg.Config.RestoreType), + RestoreDir: cfg.Config.RestoreDir, + RestoreFilter: cfg.Config.RestoreFilter, + Verify: cfg.Config.VerifyRestore, + S3Destination: resticCli.S3Bucket{ + Endpoint: cfg.Config.RestoreS3Endpoint, + AccessKey: cfg.Config.RestoreS3AccessKey, + SecretKey: cfg.Config.RestoreS3SecretKey, + Cert: fillRestoreS3Cert(), + }, + } + + if err := resticCLI.Archive(restoreOptions, cfg.Config.Tags); err != nil { + return fmt.Errorf("archive job failed: %w", err) + } + return nil } @@ -289,3 +323,15 @@ func cancelOnTermination(cancel context.CancelFunc, mainLogger logr.Logger) { cancel() }() } + +func fillRestoreS3Cert() (cert resticCli.S3Cert) { + if cfg.Config.RestoreCACert != "" { + cert.CACert = cfg.Config.RestoreCACert + } + if cfg.Config.RestoreClientCert != "" && cfg.Config.RestoreClientKey != "" { + cert.ClientCert = cfg.Config.RestoreClientCert + cert.ClientKey = cfg.Config.RestoreClientKey + } + + return cert +} diff --git a/envtest/envsuite.go b/envtest/envsuite.go index 5d9ec55b1..6d149b462 100644 --- a/envtest/envsuite.go +++ b/envtest/envsuite.go @@ -235,5 +235,6 @@ func defaultConfiguration() *cfg.Configuration { MetricsBindAddress: ":8080", PodFilter: "backupPod=true", EnableLeaderElection: true, + PodVarDir: "/k8up", } } diff --git a/operator/archivecontroller/executor.go b/operator/archivecontroller/executor.go index 34b20e5e2..cec8de105 100644 --- a/operator/archivecontroller/executor.go +++ b/operator/archivecontroller/executor.go @@ -2,8 +2,8 @@ package archivecontroller import ( "context" - "github.com/k8up-io/k8up/v2/operator/executor" + "github.com/k8up-io/k8up/v2/operator/utils" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" @@ -14,17 +14,22 @@ import ( "github.com/k8up-io/k8up/v2/operator/job" ) -const archivePath = "/archive" +const ( + archivePath = "/archive" + _dataDirName = "k8up-dir" +) // ArchiveExecutor will execute the batch.job for archive. type ArchiveExecutor struct { executor.Generic + archive *k8upv1.Archive } // NewArchiveExecutor will return a new executor for archive jobs. func NewArchiveExecutor(config job.Config) *ArchiveExecutor { return &ArchiveExecutor{ Generic: executor.Generic{Config: config}, + archive: config.Obj.(*k8upv1.Archive), } } @@ -36,22 +41,26 @@ func (a *ArchiveExecutor) GetConcurrencyLimit() int { // Execute creates the actual batch.job on the k8s api. func (a *ArchiveExecutor) Execute(ctx context.Context) error { log := controllerruntime.LoggerFrom(ctx) - archive := a.Obj.(*k8upv1.Archive) batchJob := &batchv1.Job{} batchJob.Name = a.jobName() - batchJob.Namespace = archive.Namespace + batchJob.Namespace = a.archive.Namespace _, err := controllerutil.CreateOrUpdate(ctx, a.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, archive, a.Config) + mutateErr := job.MutateBatchJob(batchJob, a.archive, a.Config) if mutateErr != nil { return mutateErr } - batchJob.Spec.Template.Spec.Containers[0].Env = a.setupEnvVars(ctx, archive) - archive.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].Args = a.setupArgs(archive) - return nil + batchJob.Spec.Template.Spec.Containers[0].Env = a.setupEnvVars(ctx, a.archive) + a.archive.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = a.attachMoreVolumes() + + args, argsErr := a.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args + + return argsErr }) if err != nil { log.Error(err, "could not create job") @@ -67,16 +76,17 @@ func (a *ArchiveExecutor) jobName() string { return k8upv1.ArchiveType.String() + "-" + a.Obj.GetName() } -func (a *ArchiveExecutor) setupArgs(archive *k8upv1.Archive) []string { - args := []string{"-archive", "-restoreType", "s3"} +func (a *ArchiveExecutor) setupArgs() ([]string, error) { + args := a.appendOptionsArgs() - if archive.Spec.RestoreSpec != nil { - if len(archive.Spec.RestoreSpec.Tags) > 0 { - args = append(args, executor.BuildTagArgs(archive.Spec.RestoreSpec.Tags)...) + args = append(args, []string{"-archive", "-restoreType", "s3"}...) + if a.archive.Spec.RestoreSpec != nil { + if len(a.archive.Spec.RestoreSpec.Tags) > 0 { + args = append(args, executor.BuildTagArgs(a.archive.Spec.RestoreSpec.Tags)...) } } - return args + return args, nil } func (a *ArchiveExecutor) setupEnvVars(ctx context.Context, archive *k8upv1.Archive) []corev1.EnvVar { @@ -118,3 +128,96 @@ func (a *ArchiveExecutor) setupEnvVars(ctx context.Context, archive *k8upv1.Arch func (a *ArchiveExecutor) cleanupOldArchives(ctx context.Context, archive *k8upv1.Archive) { a.CleanupOldResources(ctx, &k8upv1.ArchiveList{}, archive.Namespace, archive) } + +func (a *ArchiveExecutor) appendOptionsArgs() []string { + var args []string + + args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) + + if a.archive.Spec.Backend.Options != nil { + if a.archive.Spec.Backend.Options.CACert != "" { + args = append(args, []string{"--caCert", a.archive.Spec.Backend.Options.CACert}...) + } + if a.archive.Spec.Backend.Options.ClientCert != "" && a.archive.Spec.Backend.Options.ClientKey != "" { + args = append( + args, + []string{ + "--clientCert", + a.archive.Spec.Backend.Options.ClientCert, + "--clientKey", + a.archive.Spec.Backend.Options.ClientKey, + }..., + ) + } + } + + if a.archive.Spec.RestoreMethod.Options != nil { + if a.archive.Spec.RestoreMethod.Options.CACert != "" { + args = append(args, []string{"--restoreCaCert", a.archive.Spec.RestoreMethod.Options.CACert}...) + } + if a.archive.Spec.RestoreMethod.Options.ClientCert != "" && a.archive.Spec.RestoreMethod.Options.ClientKey != "" { + args = append( + args, + []string{ + "--restoreClientCert", + a.archive.Spec.RestoreMethod.Options.ClientCert, + "--restoreClientKey", + a.archive.Spec.RestoreMethod.Options.ClientKey, + }..., + ) + } + } + + return args +} + +func (a *ArchiveExecutor) attachMoreVolumes() []corev1.Volume { + ku8pVolume := corev1.Volume{ + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + } + + if utils.ZeroLen(a.archive.Spec.Volumes) { + return []corev1.Volume{ku8pVolume} + } + + moreVolumes := make([]corev1.Volume, 0, len(*a.archive.Spec.Volumes)+1) + moreVolumes = append(moreVolumes, ku8pVolume) + for _, v := range *a.archive.Spec.Volumes { + vol := v + + var volumeSource corev1.VolumeSource + if vol.PersistentVolumeClaim != nil { + volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim + } else if vol.Secret != nil { + volumeSource.Secret = vol.Secret + } else if vol.ConfigMap != nil { + volumeSource.ConfigMap = vol.ConfigMap + } else { + continue + } + + moreVolumes = append(moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }) + } + + return moreVolumes +} + +func (a *ArchiveExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { + var volumeMount []corev1.VolumeMount + + if a.archive.Spec.Backend.S3 != nil && !utils.ZeroLen(a.archive.Spec.Backend.S3.VolumeMounts) { + volumeMount = *a.archive.Spec.Backend.S3.VolumeMounts + } + if a.archive.Spec.Backend.Rest != nil && !utils.ZeroLen(a.archive.Spec.Backend.Rest.VolumeMounts) { + volumeMount = *a.archive.Spec.Backend.Rest.VolumeMounts + } + + ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} + volumeMount = append(volumeMount, ku8pVolumeMount) + + return volumeMount +} diff --git a/operator/backupcontroller/backup_utils.go b/operator/backupcontroller/backup_utils.go index 08f7f69d5..1387a9ad3 100644 --- a/operator/backupcontroller/backup_utils.go +++ b/operator/backupcontroller/backup_utils.go @@ -3,17 +3,19 @@ package backupcontroller import ( "context" "fmt" - "path" - "github.com/k8up-io/k8up/v2/operator/executor" + "github.com/k8up-io/k8up/v2/operator/utils" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "path" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/k8up-io/k8up/v2/operator/cfg" ) +const _dataDirName = "k8up-dir" + func (b *BackupExecutor) fetchPVCs(ctx context.Context, list client.ObjectList) error { return b.Config.Client.List(ctx, list, client.InNamespace(b.backup.Namespace)) } @@ -74,6 +76,16 @@ func (b *BackupExecutor) createServiceAccountAndBinding(ctx context.Context) err return err } +func (b *BackupExecutor) setupArgs() ([]string, error) { + args := b.appendOptionsArgs() + + if len(b.backup.Spec.Tags) > 0 { + args = append(args, executor.BuildTagArgs(b.backup.Spec.Tags)...) + } + + return args, nil +} + func (b *BackupExecutor) setupEnvVars() ([]corev1.EnvVar, error) { vars := executor.NewEnvVarConverter() @@ -97,3 +109,81 @@ func (b *BackupExecutor) setupEnvVars() ([]corev1.EnvVar, error) { } return vars.Convert(), nil } + +func (b *BackupExecutor) attachMoreVolumes() []corev1.Volume { + ku8pVolume := corev1.Volume{ + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + } + + if utils.ZeroLen(b.backup.Spec.Volumes) { + return []corev1.Volume{ku8pVolume} + } + + moreVolumes := make([]corev1.Volume, 0, len(*b.backup.Spec.Volumes)+1) + moreVolumes = append(moreVolumes, ku8pVolume) + for _, v := range *b.backup.Spec.Volumes { + vol := v + + var volumeSource corev1.VolumeSource + if vol.PersistentVolumeClaim != nil { + volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim + } else if vol.Secret != nil { + volumeSource.Secret = vol.Secret + } else if vol.ConfigMap != nil { + volumeSource.ConfigMap = vol.ConfigMap + } else { + continue + } + + moreVolumes = append(moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }) + } + + return moreVolumes +} + +func (b *BackupExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { + var volumeMount []corev1.VolumeMount + + if b.backup.Spec.Backend.S3 != nil && !utils.ZeroLen(b.backup.Spec.Backend.S3.VolumeMounts) { + volumeMount = *b.backup.Spec.Backend.S3.VolumeMounts + } + if b.backup.Spec.Backend.Rest != nil && !utils.ZeroLen(b.backup.Spec.Backend.Rest.VolumeMounts) { + volumeMount = *b.backup.Spec.Backend.Rest.VolumeMounts + } + + ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} + volumeMount = append(volumeMount, ku8pVolumeMount) + + return volumeMount +} + +func (b *BackupExecutor) appendOptionsArgs() []string { + var args []string + + args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) + + if b.backup.Spec.Backend.Options == nil { + return args + } + + if b.backup.Spec.Backend.Options.CACert != "" { + args = append(args, []string{"--caCert", b.backup.Spec.Backend.Options.CACert}...) + } + if b.backup.Spec.Backend.Options.ClientCert != "" && b.backup.Spec.Backend.Options.ClientKey != "" { + args = append( + args, + []string{ + "--clientCert", + b.backup.Spec.Backend.Options.ClientCert, + "--clientKey", + b.backup.Spec.Backend.Options.ClientKey, + }..., + ) + } + + return args +} diff --git a/operator/backupcontroller/executor.go b/operator/backupcontroller/executor.go index a921e596f..c808bb4bd 100644 --- a/operator/backupcontroller/executor.go +++ b/operator/backupcontroller/executor.go @@ -260,12 +260,17 @@ func (b *BackupExecutor) startBackup(ctx context.Context) error { } b.backup.Spec.AppendEnvFromToContainer(&batchJob.job.Spec.Template.Spec.Containers[0]) batchJob.job.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount - batchJob.job.Spec.Template.Spec.Containers[0].Args = executor.BuildTagArgs(b.backup.Spec.Tags) - batchJob.job.Spec.Template.Spec.Volumes = batchJob.volumes - batchJob.job.Spec.Template.Spec.Containers[0].VolumeMounts = b.newVolumeMounts(batchJob.job.Spec.Template.Spec.Volumes) + batchJob.job.Spec.Template.Spec.Volumes = append(batchJob.volumes, b.attachMoreVolumes()...) + batchJob.job.Spec.Template.Spec.Containers[0].VolumeMounts = append( + b.newVolumeMounts(batchJob.volumes), + b.attachMoreVolumeMounts()..., + ) + + args, argsErr := b.setupArgs() + batchJob.job.Spec.Template.Spec.Containers[0].Args = args index++ - return nil + return argsErr }) if err != nil { return fmt.Errorf("unable to createOrUpdate(%q): %w", batchJob.job.Name, err) diff --git a/operator/cfg/config.go b/operator/cfg/config.go index ac732ee06..e6c3cf5da 100644 --- a/operator/cfg/config.go +++ b/operator/cfg/config.go @@ -2,7 +2,6 @@ package cfg import ( "fmt" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) @@ -81,6 +80,8 @@ type Configuration struct { EnableLeaderElection bool OperatorNamespace string + PodVarDir string + // Allows to pass options to restic, see https://restic.readthedocs.io/en/stable/manual_rest.html?highlight=--option#usage-help // Format: `key=value,key2=value2` ResticOptions string diff --git a/operator/checkcontroller/executor.go b/operator/checkcontroller/executor.go index 58b9e95cb..efc57fc5e 100644 --- a/operator/checkcontroller/executor.go +++ b/operator/checkcontroller/executor.go @@ -2,6 +2,7 @@ package checkcontroller import ( "context" + "github.com/k8up-io/k8up/v2/operator/utils" "github.com/k8up-io/k8up/v2/operator/executor" batchv1 "k8s.io/api/batch/v1" @@ -13,6 +14,8 @@ import ( "github.com/k8up-io/k8up/v2/operator/job" ) +const _dataDirName = "k8up-dir" + // CheckExecutor will execute the batch.job for checks. type CheckExecutor struct { executor.Generic @@ -51,9 +54,14 @@ func (c *CheckExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Containers[0].Env = c.setupEnvVars(ctx) c.check.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].Args = []string{"-check"} + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = c.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = c.attachMoreVolumes() batchJob.Labels[job.K8upExclusive] = "true" - return nil + + args, argsErr := c.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args + + return argsErr }) if err != nil { c.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) @@ -67,6 +75,13 @@ func (c *CheckExecutor) jobName() string { return k8upv1.CheckType.String() + "-" + c.check.Name } +func (r *CheckExecutor) setupArgs() ([]string, error) { + args := r.appendOptionsArgs() + args = append(args, "-check") + + return args, nil +} + func (c *CheckExecutor) setupEnvVars(ctx context.Context) []corev1.EnvVar { log := controllerruntime.LoggerFrom(ctx) vars := executor.NewEnvVarConverter() @@ -93,3 +108,79 @@ func (c *CheckExecutor) setupEnvVars(ctx context.Context) []corev1.EnvVar { func (c *CheckExecutor) cleanupOldChecks(ctx context.Context, check *k8upv1.Check) { c.CleanupOldResources(ctx, &k8upv1.CheckList{}, check.Namespace, check) } + +func (c *CheckExecutor) appendOptionsArgs() []string { + var args []string + + args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) + + if c.check.Spec.Backend.Options != nil { + if c.check.Spec.Backend.Options.CACert != "" { + args = append(args, []string{"--caCert", c.check.Spec.Backend.Options.CACert}...) + } + if c.check.Spec.Backend.Options.ClientCert != "" && c.check.Spec.Backend.Options.ClientKey != "" { + args = append( + args, + []string{ + "--clientCert", + c.check.Spec.Backend.Options.ClientCert, + "--clientKey", + c.check.Spec.Backend.Options.ClientKey, + }..., + ) + } + } + + return args +} + +func (c *CheckExecutor) attachMoreVolumes() []corev1.Volume { + ku8pVolume := corev1.Volume{ + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + } + + if utils.ZeroLen(c.check.Spec.Volumes) { + return []corev1.Volume{ku8pVolume} + } + + moreVolumes := make([]corev1.Volume, 0, len(*c.check.Spec.Volumes)+1) + moreVolumes = append(moreVolumes, ku8pVolume) + for _, v := range *c.check.Spec.Volumes { + vol := v + + var volumeSource corev1.VolumeSource + if vol.PersistentVolumeClaim != nil { + volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim + } else if vol.Secret != nil { + volumeSource.Secret = vol.Secret + } else if vol.ConfigMap != nil { + volumeSource.ConfigMap = vol.ConfigMap + } else { + continue + } + + moreVolumes = append(moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }) + } + + return moreVolumes +} + +func (c *CheckExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { + var volumeMount []corev1.VolumeMount + + if c.check.Spec.Backend.S3 != nil && !utils.ZeroLen(c.check.Spec.Backend.S3.VolumeMounts) { + volumeMount = *c.check.Spec.Backend.S3.VolumeMounts + } + if c.check.Spec.Backend.Rest != nil && !utils.ZeroLen(c.check.Spec.Backend.Rest.VolumeMounts) { + volumeMount = *c.check.Spec.Backend.Rest.VolumeMounts + } + + ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} + volumeMount = append(volumeMount, ku8pVolumeMount) + + return volumeMount +} diff --git a/operator/prunecontroller/executor.go b/operator/prunecontroller/executor.go index ceef68ff9..91d632901 100644 --- a/operator/prunecontroller/executor.go +++ b/operator/prunecontroller/executor.go @@ -2,6 +2,7 @@ package prunecontroller import ( "context" + "github.com/k8up-io/k8up/v2/operator/utils" "strconv" "strings" @@ -16,6 +17,8 @@ import ( "github.com/k8up-io/k8up/v2/operator/job" ) +const _dataDirName = "k8up-dir" + // PruneExecutor will execute the batch.job for Prunes. type PruneExecutor struct { executor.Generic @@ -45,9 +48,14 @@ func (p *PruneExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Containers[0].Env = p.setupEnvVars(ctx, p.prune) batchJob.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount p.prune.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].Args = append([]string{"-prune"}, executor.BuildTagArgs(p.prune.Spec.Retention.Tags)...) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = p.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = p.attachMoreVolumes() batchJob.Labels[job.K8upExclusive] = "true" - return nil + + args, argsErr := p.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args + + return argsErr }) if err != nil { p.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) @@ -62,6 +70,17 @@ func (p *PruneExecutor) jobName() string { return k8upv1.PruneType.String() + "-" + p.prune.Name } +func (p *PruneExecutor) setupArgs() ([]string, error) { + args := p.appendOptionsArgs() + + args = append(args, "-prune") + if len(p.prune.Spec.Retention.Tags) > 0 { + args = append(args, executor.BuildTagArgs(p.prune.Spec.Retention.Tags)...) + } + + return args, nil +} + // Exclusive should return true for jobs that can't run while other jobs run. func (p *PruneExecutor) Exclusive() bool { return true @@ -123,3 +142,79 @@ func (p *PruneExecutor) setupEnvVars(ctx context.Context, prune *k8upv1.Prune) [ return vars.Convert() } + +func (p *PruneExecutor) appendOptionsArgs() []string { + var args []string + + args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) + + if p.prune.Spec.Backend.Options != nil { + if p.prune.Spec.Backend.Options.CACert != "" { + args = append(args, []string{"--caCert", p.prune.Spec.Backend.Options.CACert}...) + } + if p.prune.Spec.Backend.Options.ClientCert != "" && p.prune.Spec.Backend.Options.ClientKey != "" { + args = append( + args, + []string{ + "--clientCert", + p.prune.Spec.Backend.Options.ClientCert, + "--clientKey", + p.prune.Spec.Backend.Options.ClientKey, + }..., + ) + } + } + + return args +} + +func (p *PruneExecutor) attachMoreVolumes() []corev1.Volume { + ku8pVolume := corev1.Volume{ + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + } + + if utils.ZeroLen(p.prune.Spec.Volumes) { + return []corev1.Volume{ku8pVolume} + } + + moreVolumes := make([]corev1.Volume, 0, len(*p.prune.Spec.Volumes)+1) + moreVolumes = append(moreVolumes, ku8pVolume) + for _, v := range *p.prune.Spec.Volumes { + vol := v + + var volumeSource corev1.VolumeSource + if vol.PersistentVolumeClaim != nil { + volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim + } else if vol.Secret != nil { + volumeSource.Secret = vol.Secret + } else if vol.ConfigMap != nil { + volumeSource.ConfigMap = vol.ConfigMap + } else { + continue + } + + moreVolumes = append(moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }) + } + + return moreVolumes +} + +func (p *PruneExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { + var volumeMount []corev1.VolumeMount + + if p.prune.Spec.Backend.S3 != nil && !utils.ZeroLen(p.prune.Spec.Backend.S3.VolumeMounts) { + volumeMount = *p.prune.Spec.Backend.S3.VolumeMounts + } + if p.prune.Spec.Backend.Rest != nil && !utils.ZeroLen(p.prune.Spec.Backend.Rest.VolumeMounts) { + volumeMount = *p.prune.Spec.Backend.Rest.VolumeMounts + } + + ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} + volumeMount = append(volumeMount, ku8pVolumeMount) + + return volumeMount +} diff --git a/operator/restorecontroller/executor.go b/operator/restorecontroller/executor.go index ecfb0677f..f480adf9c 100644 --- a/operator/restorecontroller/executor.go +++ b/operator/restorecontroller/executor.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/k8up-io/k8up/v2/operator/utils" "github.com/k8up-io/k8up/v2/operator/executor" batchv1 "k8s.io/api/batch/v1" @@ -16,16 +17,21 @@ import ( "github.com/k8up-io/k8up/v2/operator/job" ) -const restorePath = "/restore" +const ( + restorePath = "/restore" + _dataDirName = "k8up-dir" +) type RestoreExecutor struct { executor.Generic + restore *k8upv1.Restore } // NewRestoreExecutor will return a new executor for Restore jobs. func NewRestoreExecutor(config job.Config) *RestoreExecutor { return &RestoreExecutor{ Generic: executor.Generic{Config: config}, + restore: config.Obj.(*k8upv1.Restore), } } @@ -72,10 +78,10 @@ func (r *RestoreExecutor) createRestoreObject(ctx context.Context, restore *k8up restore.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) volumes, volumeMounts := r.volumeConfig(restore) - batchJob.Spec.Template.Spec.Volumes = volumes - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts + batchJob.Spec.Template.Spec.Volumes = append(volumes, r.attachMoreVolumes()...) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = append(volumeMounts, r.attachMoreVolumeMounts()...) - args, argsErr := r.args(restore) + args, argsErr := r.setupArgs(restore) batchJob.Spec.Template.Spec.Containers[0].Args = args return argsErr }) @@ -87,9 +93,10 @@ func (r *RestoreExecutor) jobName() string { return k8upv1.RestoreType.String() + "-" + r.Obj.GetName() } -func (r *RestoreExecutor) args(restore *k8upv1.Restore) ([]string, error) { - args := []string{"-restore"} +func (r *RestoreExecutor) setupArgs(restore *k8upv1.Restore) ([]string, error) { + args := r.appendOptionsArgs() + args = append(args, "-restore") if len(restore.Spec.Tags) > 0 { args = append(args, executor.BuildTagArgs(restore.Spec.Tags)...) } @@ -110,6 +117,7 @@ func (r *RestoreExecutor) args(restore *k8upv1.Restore) ([]string, error) { default: return nil, fmt.Errorf("undefined restore method (-restoreType) on '%v/%v'", restore.Namespace, restore.Name) } + return args, nil } @@ -168,3 +176,96 @@ func (r *RestoreExecutor) setupEnvVars(ctx context.Context, restore *k8upv1.Rest return vars.Convert() } + +func (r *RestoreExecutor) appendOptionsArgs() []string { + var args []string + + args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) + + if r.restore.Spec.Backend.Options != nil { + if r.restore.Spec.Backend.Options.CACert != "" { + args = append(args, []string{"--caCert", r.restore.Spec.Backend.Options.CACert}...) + } + if r.restore.Spec.Backend.Options.ClientCert != "" && r.restore.Spec.Backend.Options.ClientKey != "" { + args = append( + args, + []string{ + "--clientCert", + r.restore.Spec.Backend.Options.ClientCert, + "--clientKey", + r.restore.Spec.Backend.Options.ClientKey, + }..., + ) + } + } + + if r.restore.Spec.RestoreMethod.Options != nil { + if r.restore.Spec.RestoreMethod.Options.CACert != "" { + args = append(args, []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.Options.CACert}...) + } + if r.restore.Spec.RestoreMethod.Options.ClientCert != "" && r.restore.Spec.RestoreMethod.Options.ClientKey != "" { + args = append( + args, + []string{ + "--restoreClientCert", + r.restore.Spec.RestoreMethod.Options.ClientCert, + "--restoreClientKey", + r.restore.Spec.RestoreMethod.Options.ClientKey, + }..., + ) + } + } + + return args +} + +func (r *RestoreExecutor) attachMoreVolumes() []corev1.Volume { + ku8pVolume := corev1.Volume{ + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + } + + if utils.ZeroLen(r.restore.Spec.Volumes) { + return []corev1.Volume{ku8pVolume} + } + + moreVolumes := make([]corev1.Volume, 0, len(*r.restore.Spec.Volumes)+1) + moreVolumes = append(moreVolumes, ku8pVolume) + for _, v := range *r.restore.Spec.Volumes { + vol := v + + var volumeSource corev1.VolumeSource + if vol.PersistentVolumeClaim != nil { + volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim + } else if vol.Secret != nil { + volumeSource.Secret = vol.Secret + } else if vol.ConfigMap != nil { + volumeSource.ConfigMap = vol.ConfigMap + } else { + continue + } + + moreVolumes = append(moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }) + } + + return moreVolumes +} + +func (r *RestoreExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { + var volumeMount []corev1.VolumeMount + + if r.restore.Spec.Backend.S3 != nil && !utils.ZeroLen(r.restore.Spec.Backend.S3.VolumeMounts) { + volumeMount = *r.restore.Spec.Backend.S3.VolumeMounts + } + if r.restore.Spec.Backend.Rest != nil && !utils.ZeroLen(r.restore.Spec.Backend.Rest.VolumeMounts) { + volumeMount = *r.restore.Spec.Backend.Rest.VolumeMounts + } + + ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} + volumeMount = append(volumeMount, ku8pVolumeMount) + + return volumeMount +} diff --git a/operator/restorecontroller/executor_test.go b/operator/restorecontroller/executor_test.go index d2abe7f74..1956aaa88 100644 --- a/operator/restorecontroller/executor_test.go +++ b/operator/restorecontroller/executor_test.go @@ -248,7 +248,7 @@ func TestRestore_args(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { e := NewRestoreExecutor(*newConfig()) - args, err := e.args(tt.GivenResource) + args, err := e.setupArgs(tt.GivenResource) require.NoError(t, err) assert.Equal(t, tt.ExpectedArgs, args) diff --git a/operator/utils/utils.go b/operator/utils/utils.go new file mode 100644 index 000000000..fba210a6f --- /dev/null +++ b/operator/utils/utils.go @@ -0,0 +1,23 @@ +package utils + +import ( + "math/rand" + "reflect" + "time" +) + +func RandomStringGenerator(n int) string { + var characters = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + rand.New(rand.NewSource(time.Now().UnixNano())) + b := make([]rune, n) + for i := range b { + b[i] = characters[rand.Intn(len(characters))] + } + return string(b) +} + +func ZeroLen(v interface{}) bool { + return v == nil || + (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) || + (reflect.ValueOf(v).Kind() == reflect.Ptr && !reflect.ValueOf(v).IsNil() && reflect.ValueOf(v).Elem().Len() == 0) +} diff --git a/restic/cfg/config.go b/restic/cfg/config.go index 98e9e89b0..e2101c064 100644 --- a/restic/cfg/config.go +++ b/restic/cfg/config.go @@ -52,6 +52,9 @@ type Configuration struct { RestoreSnap string RestoreType string RestoreFilter string + RestoreCACert string + RestoreClientCert string + RestoreClientKey string VerifyRestore bool RestoreTrimPath bool @@ -75,6 +78,11 @@ type Configuration struct { TargetPods []string SleepDuration time.Duration + + VarDir string + CACert string + ClientCert string + ClientKey string } // Validate ensures a consistent configuration and returns an error should that not be the case diff --git a/restic/cli/archive.go b/restic/cli/archive.go index 609222fcf..04358ff52 100644 --- a/restic/cli/archive.go +++ b/restic/cli/archive.go @@ -1,7 +1,7 @@ package cli // Archive uploads the last version of each snapshot to S3. -func (r *Restic) Archive(restoreFilter string, verifyRestore bool, tags ArrayOpts) error { +func (r *Restic) Archive(options RestoreOptions, tags ArrayOpts) error { archiveLogger := r.logger.WithName("archive") @@ -15,7 +15,7 @@ func (r *Restic) Archive(restoreFilter string, verifyRestore bool, tags ArrayOpt for _, v := range r.snapshots { PVCname := r.parsePath(v.Paths) archiveLogger.Info("starting archival for", "namespace", v.Hostname, "pvc", PVCname) - err := r.Restore(v.ID, RestoreOptions{RestoreType: S3Restore, RestoreFilter: restoreFilter, Verify: verifyRestore}, nil) + err := r.Restore(v.ID, options, nil) if err != nil { return err } diff --git a/restic/cli/init.go b/restic/cli/init.go index 240a59270..5282b4278 100644 --- a/restic/cli/init.go +++ b/restic/cli/init.go @@ -9,9 +9,14 @@ import ( "github.com/k8up-io/k8up/v2/restic/logging" ) -// Init initialises a repository, checks if the repositor exists and will -// initialise it if not. It's save to call this every time. +// Init initialises a repository, checks if the repository exists and will +// initialise it if not. It's safe to call this every time. func (r *Restic) Init() error { + if r.clientCert != (clientCert{}) { + if err := generatePemFile(r.clientCert.cert, r.clientCert.key, r.clientCert.pem); err != nil { + return err + } + } initLogger := r.logger.WithName("RepoInit") resticLogger := initLogger.WithName("restic") diff --git a/restic/cli/restic.go b/restic/cli/restic.go index 91ddd1030..12aa8bf26 100644 --- a/restic/cli/restic.go +++ b/restic/cli/restic.go @@ -2,7 +2,9 @@ package cli import ( "context" + "github.com/k8up-io/k8up/v2/operator/utils" "path" + "path/filepath" "strings" "github.com/go-logr/logr" @@ -40,6 +42,15 @@ type Restic struct { // globalFlags are applied to all invocations of restic globalFlags Flags statsHandler StatsHandler + + caCert string + clientCert clientCert +} + +type clientCert struct { + cert string + key string + pem string } // New returns a new Restic reference @@ -52,11 +63,31 @@ func New(ctx context.Context, logger logr.Logger, statsHandler StatsHandler) *Re globalFlags.AddFlag("--option", options...) } + var caCert string + if cfg.Config.CACert != "" { + caCert = cfg.Config.CACert + globalFlags.AddFlag("--cacert", cfg.Config.CACert) + } + var cc clientCert + if cfg.Config.ClientCert != "" && cfg.Config.ClientKey != "" { + var pemFileName strings.Builder + pemFileName.WriteString("restic.repo.") + pemFileName.WriteString(utils.RandomStringGenerator(10)) + pemFileName.WriteString(".pem") + + cc.cert = cfg.Config.ClientCert + cc.key = cfg.Config.ClientKey + cc.pem = filepath.Join(cfg.Config.VarDir, pemFileName.String()) + globalFlags.AddFlag("--tls-client-cert", cc.pem) + } + return &Restic{ logger: logger, resticPath: cfg.Config.ResticBin, ctx: ctx, bucket: path.Base(cfg.Config.ResticRepository), + caCert: caCert, + clientCert: cc, globalFlags: globalFlags, statsHandler: statsHandler, } diff --git a/restic/cli/restore.go b/restic/cli/restore.go index 912bab429..fca34b184 100644 --- a/restic/cli/restore.go +++ b/restic/cli/restore.go @@ -46,6 +46,13 @@ type S3Bucket struct { Endpoint string AccessKey string SecretKey string + Cert S3Cert +} + +type S3Cert struct { + CACert string + ClientCert string + ClientKey string } type fileNode struct { @@ -71,7 +78,7 @@ func (r *Restic) Restore(snapshotID string, options RestoreOptions, tags ArrayOp if len(tags) > 0 { restorelogger.Info("loading snapshots", "tags", tags.String) } else { - restorelogger.Info("loading all snapshots from repositoy") + restorelogger.Info("loading all snapshots from repository") } err := r.Snapshots(tags) @@ -96,7 +103,7 @@ func (r *Restic) Restore(snapshotID string, options RestoreOptions, tags ArrayOp case S3Restore: stats = &RestoreStats{} - err = r.s3Restore(restorelogger, latestSnap, stats) + err = r.s3Restore(restorelogger, options.S3Destination, latestSnap, stats) default: err = fmt.Errorf("no valid restore type") } @@ -287,7 +294,7 @@ func (r *Restic) parsePath(paths []string) string { return path.Base(paths[len(paths)-1]) } -func (r *Restic) s3Restore(log logr.Logger, snapshot dto.Snapshot, stats *RestoreStats) error { +func (r *Restic) s3Restore(log logr.Logger, s3Options S3Bucket, snapshot dto.Snapshot, stats *RestoreStats) error { log.Info("S3 chosen as restore destination") cleanupCtx, cleanup := context.WithCancel(r.ctx) defer cleanup() @@ -296,10 +303,10 @@ func (r *Restic) s3Restore(log logr.Logger, snapshot dto.Snapshot, stats *Restor PVCName := r.parsePath(snapshot.Paths) fileName := fmt.Sprintf("backup-%v-%v-%v.tar.gz", snapshot.Hostname, PVCName, snapDate) - stats.RestoreLocation = fmt.Sprintf("%s/%s", cfg.Config.RestoreS3Endpoint, fileName) + stats.RestoreLocation = fmt.Sprintf("%s/%s", s3Options.Endpoint, fileName) stats.SnapshotID = snapshot.ID - s3TransmissionErrorChannel, s3writer, err := r.s3Connect(r.ctx, fileName) + s3TransmissionErrorChannel, s3writer, err := r.s3Connect(r.ctx, s3Options, fileName) if err != nil { return err } @@ -311,12 +318,16 @@ func (r *Restic) s3Restore(log logr.Logger, snapshot dto.Snapshot, stats *Restor } }(cleanupCtx, log, s3writer) - err = r.s3Transmission(log, stats, s3writer) - if err != nil { - return err - } + go func(log logr.Logger, stats *RestoreStats, s3writer *io.PipeWriter) { + err = r.s3Transmission(log, stats, s3writer) + if err != nil { + s3TransmissionErrorChannel <- err + return + } + + cleanup() + }(log, stats, s3writer) - cleanup() return <-s3TransmissionErrorChannel } @@ -374,8 +385,13 @@ func (r *Restic) doRestore(log logr.Logger, latestSnap dto.Snapshot, snapRoot st cmd.Run() } -func (r *Restic) s3Connect(ctx context.Context, fileName string) (chan error, *io.PipeWriter, error) { - s3Client := s3.New(cfg.Config.RestoreS3Endpoint, cfg.Config.RestoreS3AccessKey, cfg.Config.RestoreS3SecretKey) +func (r *Restic) s3Connect(ctx context.Context, s3Options S3Bucket, fileName string) (chan error, *io.PipeWriter, error) { + s3Client := s3.New( + s3Options.Endpoint, + s3Options.AccessKey, + s3Options.SecretKey, + s3.Cert(s3Options.Cert), + ) err := s3Client.Connect(ctx) if err != nil { return nil, nil, err diff --git a/restic/cli/utils.go b/restic/cli/utils.go new file mode 100644 index 000000000..b4c1eaf9d --- /dev/null +++ b/restic/cli/utils.go @@ -0,0 +1,38 @@ +package cli + +import ( + "io" + "os" +) + +func generatePemFile(clientCert string, clientKey string, dest string) error { + certIn, err := os.Open(clientCert) + if err != nil { + return err + } + defer certIn.Close() + + tlsIn, err := os.Open(clientKey) + if err != nil { + return err + } + defer tlsIn.Close() + + out, err := os.OpenFile(dest, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, certIn) + if err != nil { + return err + } + + _, err = io.Copy(out, tlsIn) + if err != nil { + return err + } + + return nil +} diff --git a/restic/s3/client.go b/restic/s3/client.go index bec7dd0ce..d83a86509 100644 --- a/restic/s3/client.go +++ b/restic/s3/client.go @@ -2,13 +2,16 @@ package s3 import ( "context" + "crypto/tls" + "crypto/x509" "fmt" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" "io" + "net/http" "net/url" + "os" "strings" - - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" ) // Client wraps the minio s3 client @@ -18,6 +21,13 @@ type Client struct { SecretAccessKey string minioClient *minio.Client bucket string + cert Cert +} + +type Cert struct { + CACert string + ClientCert string + ClientKey string } type UploadObject struct { @@ -26,11 +36,12 @@ type UploadObject struct { } // New returns a new Client -func New(endpoint, accessKeyID, secretAccessKey string) *Client { +func New(endpoint, accessKeyID, secretAccessKey string, cert Cert) *Client { return &Client{ Endpoint: endpoint, AccessKeyID: accessKeyID, SecretAccessKey: secretAccessKey, + cert: cert, } } @@ -50,11 +61,36 @@ func (c *Client) Connect(ctx context.Context) error { return fmt.Errorf("endpoint '%v' has wrong scheme '%s' (should be 'http' or 'https')", c.Endpoint, u.Scheme) } + var transportTlsConfig = &tls.Config{} + if c.cert.CACert != "" { + caCert, err := os.ReadFile(c.cert.CACert) + if err != nil { + return err + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + transportTlsConfig.RootCAs = caCertPool + } + if c.cert.ClientCert != "" && c.cert.ClientKey != "" { + clientCert, err := tls.LoadX509KeyPair(c.cert.ClientCert, c.cert.ClientKey) + if err != nil { + return err + } + + transportTlsConfig.Certificates = []tls.Certificate{clientCert} + } + + var TransportRoundTripper http.RoundTripper = &http.Transport{ + TLSClientConfig: transportTlsConfig, + } + c.bucket = strings.Replace(u.Path, "/", "", 1) c.Endpoint = u.Host mc, err := minio.New(c.Endpoint, &minio.Options{ - Creds: credentials.NewStaticV2(c.AccessKeyID, c.SecretAccessKey, ""), - Secure: ssl, + Creds: credentials.NewStaticV2(c.AccessKeyID, c.SecretAccessKey, ""), + Secure: ssl, + Transport: TransportRoundTripper, }) c.minioClient = mc From a0f78d7b515cf8605e565ad3c92e236592962033 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:01:14 +0330 Subject: [PATCH 06/38] [ADD] Adding container volumes when they are mounting Signed-off-by: poyaz --- .gitignore | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 05d0e2dde..0becf7a1e 100644 --- a/.gitignore +++ b/.gitignore @@ -44,4 +44,9 @@ e2e/debug .cr-index/ # Vagrant -.vagrant/ \ No newline at end of file +.vagrant/ + +# Container volumes mount +.config/ +.kube/ +.npm/ From c42e748d79268e74af71974f7ce18f4ac91a315c Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:02:47 +0330 Subject: [PATCH 07/38] [UPDATE] Generating new crd according to adding VolumeMounts to BackendSpec and RestoreMethodSpec Also these changes appends: - Running linter - Fixing check null pointer error if BackendSpec or Volume of Spec is null - Fixing check add duplicate VolumeMount in archive and restore API - Refactoring setupArgs Signed-off-by: poyaz --- operator/archivecontroller/executor.go | 123 ++++++++++++------- operator/backupcontroller/backup_utils.go | 84 +++++++------ operator/checkcontroller/executor.go | 107 +++++++++-------- operator/prunecontroller/executor.go | 116 ++++++++++-------- operator/restorecontroller/executor.go | 137 +++++++++++++++------- 5 files changed, 347 insertions(+), 220 deletions(-) diff --git a/operator/archivecontroller/executor.go b/operator/archivecontroller/executor.go index cec8de105..02309048e 100644 --- a/operator/archivecontroller/executor.go +++ b/operator/archivecontroller/executor.go @@ -2,13 +2,15 @@ package archivecontroller import ( "context" - "github.com/k8up-io/k8up/v2/operator/executor" - "github.com/k8up-io/k8up/v2/operator/utils" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/k8up-io/k8up/v2/operator/executor" + "github.com/k8up-io/k8up/v2/operator/utils" + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -46,25 +48,33 @@ func (a *ArchiveExecutor) Execute(ctx context.Context) error { batchJob.Name = a.jobName() batchJob.Namespace = a.archive.Namespace - _, err := controllerutil.CreateOrUpdate(ctx, a.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, a.archive, a.Config) - if mutateErr != nil { - return mutateErr - } + _, err := controllerutil.CreateOrUpdate( + ctx, a.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, a.archive, a.Config) + if mutateErr != nil { + return mutateErr + } - batchJob.Spec.Template.Spec.Containers[0].Env = a.setupEnvVars(ctx, a.archive) - a.archive.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = a.attachMoreVolumes() + batchJob.Spec.Template.Spec.Containers[0].Env = a.setupEnvVars(ctx, a.archive) + a.archive.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = a.attachMoreVolumes() - args, argsErr := a.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args + args, argsErr := a.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args - return argsErr - }) + return argsErr + }, + ) if err != nil { log.Error(err, "could not create job") - a.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) + a.SetConditionFalseWithMessage( + ctx, + k8upv1.ConditionReady, + k8upv1.ReasonCreationFailed, + "could not create job: %v", + err, + ) return err } @@ -77,19 +87,19 @@ func (a *ArchiveExecutor) jobName() string { } func (a *ArchiveExecutor) setupArgs() ([]string, error) { - args := a.appendOptionsArgs() - - args = append(args, []string{"-archive", "-restoreType", "s3"}...) - if a.archive.Spec.RestoreSpec != nil { - if len(a.archive.Spec.RestoreSpec.Tags) > 0 { - args = append(args, executor.BuildTagArgs(a.archive.Spec.RestoreSpec.Tags)...) - } + args := []string{"-varDir", cfg.Config.PodVarDir, "-archive", "-restoreType", "s3"} + if a.archive.Spec.RestoreSpec != nil && len(a.archive.Spec.RestoreSpec.Tags) > 0 { + args = append(args, executor.BuildTagArgs(a.archive.Spec.RestoreSpec.Tags)...) } + args = append(args, a.appendOptionsArgs()...) return args, nil } -func (a *ArchiveExecutor) setupEnvVars(ctx context.Context, archive *k8upv1.Archive) []corev1.EnvVar { +func (a *ArchiveExecutor) setupEnvVars( + ctx context.Context, + archive *k8upv1.Archive, +) []corev1.EnvVar { log := controllerruntime.LoggerFrom(ctx) vars := executor.NewEnvVarConverter() @@ -119,7 +129,14 @@ func (a *ArchiveExecutor) setupEnvVars(ctx context.Context, archive *k8upv1.Arch err := vars.Merge(executor.DefaultEnv(a.Obj.GetNamespace())) if err != nil { - log.Error(err, "error while merging the environment variables", "name", a.Obj.GetName(), "namespace", a.Obj.GetNamespace()) + log.Error( + err, + "error while merging the environment variables", + "name", + a.Obj.GetName(), + "namespace", + a.Obj.GetNamespace(), + ) } return vars.Convert() @@ -132,36 +149,37 @@ func (a *ArchiveExecutor) cleanupOldArchives(ctx context.Context, archive *k8upv func (a *ArchiveExecutor) appendOptionsArgs() []string { var args []string - args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) - - if a.archive.Spec.Backend.Options != nil { + if a.archive.Spec.Backend != nil && a.archive.Spec.Backend.Options != nil { if a.archive.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"--caCert", a.archive.Spec.Backend.Options.CACert}...) + args = append(args, []string{"-caCert", a.archive.Spec.Backend.Options.CACert}...) } if a.archive.Spec.Backend.Options.ClientCert != "" && a.archive.Spec.Backend.Options.ClientKey != "" { args = append( args, []string{ - "--clientCert", + "-clientCert", a.archive.Spec.Backend.Options.ClientCert, - "--clientKey", + "-clientKey", a.archive.Spec.Backend.Options.ClientKey, }..., ) } } - if a.archive.Spec.RestoreMethod.Options != nil { + if a.archive.Spec.RestoreSpec != nil && a.archive.Spec.RestoreMethod.Options != nil { if a.archive.Spec.RestoreMethod.Options.CACert != "" { - args = append(args, []string{"--restoreCaCert", a.archive.Spec.RestoreMethod.Options.CACert}...) + args = append( + args, + []string{"-restoreCaCert", a.archive.Spec.RestoreMethod.Options.CACert}..., + ) } if a.archive.Spec.RestoreMethod.Options.ClientCert != "" && a.archive.Spec.RestoreMethod.Options.ClientKey != "" { args = append( args, []string{ - "--restoreClientCert", + "-restoreClientCert", a.archive.Spec.RestoreMethod.Options.ClientCert, - "--restoreClientKey", + "-restoreClientKey", a.archive.Spec.RestoreMethod.Options.ClientKey, }..., ) @@ -197,10 +215,12 @@ func (a *ArchiveExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append(moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }) + moreVolumes = append( + moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }, + ) } return moreVolumes @@ -209,11 +229,28 @@ func (a *ArchiveExecutor) attachMoreVolumes() []corev1.Volume { func (a *ArchiveExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { var volumeMount []corev1.VolumeMount - if a.archive.Spec.Backend.S3 != nil && !utils.ZeroLen(a.archive.Spec.Backend.S3.VolumeMounts) { - volumeMount = *a.archive.Spec.Backend.S3.VolumeMounts + if a.archive.Spec.Backend != nil && !utils.ZeroLen(a.archive.Spec.Backend.VolumeMounts) { + volumeMount = append(volumeMount, *a.archive.Spec.Backend.VolumeMounts...) } - if a.archive.Spec.Backend.Rest != nil && !utils.ZeroLen(a.archive.Spec.Backend.Rest.VolumeMounts) { - volumeMount = *a.archive.Spec.Backend.Rest.VolumeMounts + if a.archive.Spec.RestoreMethod != nil && !utils.ZeroLen(a.archive.Spec.RestoreMethod.VolumeMounts) { + for _, v1 := range *a.archive.Spec.RestoreMethod.VolumeMounts { + vm1 := v1 + var isExist bool + + for _, v2 := range volumeMount { + vm2 := v2 + if vm1.Name == vm2.Name && vm1.MountPath == vm2.MountPath { + isExist = true + break + } + } + + if isExist { + continue + } + + volumeMount = append(volumeMount, vm1) + } } ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} diff --git a/operator/backupcontroller/backup_utils.go b/operator/backupcontroller/backup_utils.go index 1387a9ad3..796ee4527 100644 --- a/operator/backupcontroller/backup_utils.go +++ b/operator/backupcontroller/backup_utils.go @@ -3,14 +3,16 @@ package backupcontroller import ( "context" "fmt" - "github.com/k8up-io/k8up/v2/operator/executor" - "github.com/k8up-io/k8up/v2/operator/utils" + "path" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "path" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/k8up-io/k8up/v2/operator/executor" + "github.com/k8up-io/k8up/v2/operator/utils" + "github.com/k8up-io/k8up/v2/operator/cfg" ) @@ -32,7 +34,10 @@ func (b *BackupExecutor) newVolumeMounts(claims []corev1.Volume) []corev1.Volume return mounts } -func containsAccessMode(s []corev1.PersistentVolumeAccessMode, e corev1.PersistentVolumeAccessMode) bool { +func containsAccessMode( + s []corev1.PersistentVolumeAccessMode, + e corev1.PersistentVolumeAccessMode, +) bool { for _, a := range s { if a == e { return true @@ -45,9 +50,11 @@ func (b *BackupExecutor) createServiceAccountAndBinding(ctx context.Context) err sa := &corev1.ServiceAccount{} sa.Name = cfg.Config.ServiceAccount sa.Namespace = b.backup.Namespace - _, err := controllerruntime.CreateOrUpdate(ctx, b.Config.Client, sa, func() error { - return nil - }) + _, err := controllerruntime.CreateOrUpdate( + ctx, b.Config.Client, sa, func() error { + return nil + }, + ) if err != nil { return err } @@ -58,30 +65,32 @@ func (b *BackupExecutor) createServiceAccountAndBinding(ctx context.Context) err roleBinding := &rbacv1.RoleBinding{} roleBinding.Name = cfg.Config.PodExecRoleName + "-namespaced" roleBinding.Namespace = b.backup.Namespace - _, err = controllerruntime.CreateOrUpdate(ctx, b.Config.Client, roleBinding, func() error { - roleBinding.Subjects = []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: b.backup.Namespace, - Name: sa.Name, - }, - } - roleBinding.RoleRef = rbacv1.RoleRef{ - Kind: "ClusterRole", - Name: "k8up-executor", - APIGroup: "rbac.authorization.k8s.io", - } - return nil - }) + _, err = controllerruntime.CreateOrUpdate( + ctx, b.Config.Client, roleBinding, func() error { + roleBinding.Subjects = []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Namespace: b.backup.Namespace, + Name: sa.Name, + }, + } + roleBinding.RoleRef = rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "k8up-executor", + APIGroup: "rbac.authorization.k8s.io", + } + return nil + }, + ) return err } func (b *BackupExecutor) setupArgs() ([]string, error) { - args := b.appendOptionsArgs() - + args := []string{"--varDir", cfg.Config.PodVarDir} if len(b.backup.Spec.Tags) > 0 { args = append(args, executor.BuildTagArgs(b.backup.Spec.Tags)...) } + args = append(args, b.appendOptionsArgs()...) return args, nil } @@ -136,10 +145,12 @@ func (b *BackupExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append(moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }) + moreVolumes = append( + moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }, + ) } return moreVolumes @@ -148,11 +159,8 @@ func (b *BackupExecutor) attachMoreVolumes() []corev1.Volume { func (b *BackupExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { var volumeMount []corev1.VolumeMount - if b.backup.Spec.Backend.S3 != nil && !utils.ZeroLen(b.backup.Spec.Backend.S3.VolumeMounts) { - volumeMount = *b.backup.Spec.Backend.S3.VolumeMounts - } - if b.backup.Spec.Backend.Rest != nil && !utils.ZeroLen(b.backup.Spec.Backend.Rest.VolumeMounts) { - volumeMount = *b.backup.Spec.Backend.Rest.VolumeMounts + if b.backup.Spec.Backend != nil && !utils.ZeroLen(b.backup.Spec.Backend.VolumeMounts) { + volumeMount = *b.backup.Spec.Backend.VolumeMounts } ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} @@ -164,22 +172,20 @@ func (b *BackupExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { func (b *BackupExecutor) appendOptionsArgs() []string { var args []string - args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) - - if b.backup.Spec.Backend.Options == nil { + if !(b.backup.Spec.Backend != nil && b.backup.Spec.Backend.Options != nil) { return args } if b.backup.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"--caCert", b.backup.Spec.Backend.Options.CACert}...) + args = append(args, []string{"-caCert", b.backup.Spec.Backend.Options.CACert}...) } if b.backup.Spec.Backend.Options.ClientCert != "" && b.backup.Spec.Backend.Options.ClientKey != "" { args = append( args, []string{ - "--clientCert", + "-clientCert", b.backup.Spec.Backend.Options.ClientCert, - "--clientKey", + "-clientKey", b.backup.Spec.Backend.Options.ClientKey, }..., ) diff --git a/operator/checkcontroller/executor.go b/operator/checkcontroller/executor.go index efc57fc5e..a97e10f1d 100644 --- a/operator/checkcontroller/executor.go +++ b/operator/checkcontroller/executor.go @@ -2,13 +2,15 @@ package checkcontroller import ( "context" + "github.com/k8up-io/k8up/v2/operator/utils" - "github.com/k8up-io/k8up/v2/operator/executor" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" + "github.com/k8up-io/k8up/v2/operator/executor" + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -46,25 +48,33 @@ func (c *CheckExecutor) Execute(ctx context.Context) error { batchJob.Name = c.jobName() batchJob.Namespace = c.check.Namespace - _, err := controllerruntime.CreateOrUpdate(ctx, c.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, c.check, c.Config) - if mutateErr != nil { - return mutateErr - } + _, err := controllerruntime.CreateOrUpdate( + ctx, c.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, c.check, c.Config) + if mutateErr != nil { + return mutateErr + } - batchJob.Spec.Template.Spec.Containers[0].Env = c.setupEnvVars(ctx) - c.check.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = c.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = c.attachMoreVolumes() - batchJob.Labels[job.K8upExclusive] = "true" + batchJob.Spec.Template.Spec.Containers[0].Env = c.setupEnvVars(ctx) + c.check.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = c.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = c.attachMoreVolumes() + batchJob.Labels[job.K8upExclusive] = "true" - args, argsErr := c.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args + args, argsErr := c.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args - return argsErr - }) + return argsErr + }, + ) if err != nil { - c.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) + c.SetConditionFalseWithMessage( + ctx, + k8upv1.ConditionReady, + k8upv1.ReasonCreationFailed, + "could not create job: %v", + err, + ) return err } c.SetStarted(ctx, "the job '%v/%v' was created", batchJob.Namespace, batchJob.Name) @@ -75,9 +85,9 @@ func (c *CheckExecutor) jobName() string { return k8upv1.CheckType.String() + "-" + c.check.Name } -func (r *CheckExecutor) setupArgs() ([]string, error) { - args := r.appendOptionsArgs() - args = append(args, "-check") +func (c *CheckExecutor) setupArgs() ([]string, error) { + args := []string{"-varDir", cfg.Config.PodVarDir, "-check"} + args = append(args, c.appendOptionsArgs()...) return args, nil } @@ -99,7 +109,14 @@ func (c *CheckExecutor) setupEnvVars(ctx context.Context) []corev1.EnvVar { err := vars.Merge(executor.DefaultEnv(c.Obj.GetNamespace())) if err != nil { - log.Error(err, "error while merging the environment variables", "name", c.Obj.GetName(), "namespace", c.Obj.GetNamespace()) + log.Error( + err, + "error while merging the environment variables", + "name", + c.Obj.GetName(), + "namespace", + c.Obj.GetNamespace(), + ) } return vars.Convert() @@ -111,24 +128,23 @@ func (c *CheckExecutor) cleanupOldChecks(ctx context.Context, check *k8upv1.Chec func (c *CheckExecutor) appendOptionsArgs() []string { var args []string + if !(c.check.Spec.Backend != nil && c.check.Spec.Backend.Options != nil) { + return args + } - args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) - - if c.check.Spec.Backend.Options != nil { - if c.check.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"--caCert", c.check.Spec.Backend.Options.CACert}...) - } - if c.check.Spec.Backend.Options.ClientCert != "" && c.check.Spec.Backend.Options.ClientKey != "" { - args = append( - args, - []string{ - "--clientCert", - c.check.Spec.Backend.Options.ClientCert, - "--clientKey", - c.check.Spec.Backend.Options.ClientKey, - }..., - ) - } + if c.check.Spec.Backend.Options.CACert != "" { + args = append(args, []string{"-caCert", c.check.Spec.Backend.Options.CACert}...) + } + if c.check.Spec.Backend.Options.ClientCert != "" && c.check.Spec.Backend.Options.ClientKey != "" { + args = append( + args, + []string{ + "-clientCert", + c.check.Spec.Backend.Options.ClientCert, + "-clientKey", + c.check.Spec.Backend.Options.ClientKey, + }..., + ) } return args @@ -160,10 +176,12 @@ func (c *CheckExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append(moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }) + moreVolumes = append( + moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }, + ) } return moreVolumes @@ -172,11 +190,8 @@ func (c *CheckExecutor) attachMoreVolumes() []corev1.Volume { func (c *CheckExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { var volumeMount []corev1.VolumeMount - if c.check.Spec.Backend.S3 != nil && !utils.ZeroLen(c.check.Spec.Backend.S3.VolumeMounts) { - volumeMount = *c.check.Spec.Backend.S3.VolumeMounts - } - if c.check.Spec.Backend.Rest != nil && !utils.ZeroLen(c.check.Spec.Backend.Rest.VolumeMounts) { - volumeMount = *c.check.Spec.Backend.Rest.VolumeMounts + if c.check.Spec.Backend != nil && !utils.ZeroLen(c.check.Spec.Backend.VolumeMounts) { + volumeMount = *c.check.Spec.Backend.VolumeMounts } ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} diff --git a/operator/prunecontroller/executor.go b/operator/prunecontroller/executor.go index 91d632901..5caded3cc 100644 --- a/operator/prunecontroller/executor.go +++ b/operator/prunecontroller/executor.go @@ -2,16 +2,18 @@ package prunecontroller import ( "context" - "github.com/k8up-io/k8up/v2/operator/utils" "strconv" "strings" - "github.com/k8up-io/k8up/v2/operator/executor" + "github.com/k8up-io/k8up/v2/operator/utils" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/k8up-io/k8up/v2/operator/executor" + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -39,26 +41,34 @@ func (p *PruneExecutor) Execute(ctx context.Context) error { batchJob.Name = p.jobName() batchJob.Namespace = p.prune.Namespace - _, err := controllerutil.CreateOrUpdate(ctx, p.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, p.prune, p.Config) - if mutateErr != nil { - return mutateErr - } - - batchJob.Spec.Template.Spec.Containers[0].Env = p.setupEnvVars(ctx, p.prune) - batchJob.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount - p.prune.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = p.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = p.attachMoreVolumes() - batchJob.Labels[job.K8upExclusive] = "true" - - args, argsErr := p.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args - - return argsErr - }) + _, err := controllerutil.CreateOrUpdate( + ctx, p.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, p.prune, p.Config) + if mutateErr != nil { + return mutateErr + } + + batchJob.Spec.Template.Spec.Containers[0].Env = p.setupEnvVars(ctx, p.prune) + batchJob.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount + p.prune.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = p.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = p.attachMoreVolumes() + batchJob.Labels[job.K8upExclusive] = "true" + + args, argsErr := p.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args + + return argsErr + }, + ) if err != nil { - p.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) + p.SetConditionFalseWithMessage( + ctx, + k8upv1.ConditionReady, + k8upv1.ReasonCreationFailed, + "could not create job: %v", + err, + ) return err } @@ -71,12 +81,11 @@ func (p *PruneExecutor) jobName() string { } func (p *PruneExecutor) setupArgs() ([]string, error) { - args := p.appendOptionsArgs() - - args = append(args, "-prune") + args := []string{"-varDir", cfg.Config.PodVarDir, "-prune"} if len(p.prune.Spec.Retention.Tags) > 0 { args = append(args, executor.BuildTagArgs(p.prune.Spec.Retention.Tags)...) } + args = append(args, p.appendOptionsArgs()...) return args, nil } @@ -137,7 +146,14 @@ func (p *PruneExecutor) setupEnvVars(ctx context.Context, prune *k8upv1.Prune) [ err := vars.Merge(executor.DefaultEnv(p.Obj.GetNamespace())) if err != nil { - log.Error(err, "error while merging the environment variables", "name", p.Obj.GetName(), "namespace", p.Obj.GetNamespace()) + log.Error( + err, + "error while merging the environment variables", + "name", + p.Obj.GetName(), + "namespace", + p.Obj.GetNamespace(), + ) } return vars.Convert() @@ -145,24 +161,23 @@ func (p *PruneExecutor) setupEnvVars(ctx context.Context, prune *k8upv1.Prune) [ func (p *PruneExecutor) appendOptionsArgs() []string { var args []string + if !(p.prune.Spec.Backend != nil && p.prune.Spec.Backend.Options != nil) { + return args + } - args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) - - if p.prune.Spec.Backend.Options != nil { - if p.prune.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"--caCert", p.prune.Spec.Backend.Options.CACert}...) - } - if p.prune.Spec.Backend.Options.ClientCert != "" && p.prune.Spec.Backend.Options.ClientKey != "" { - args = append( - args, - []string{ - "--clientCert", - p.prune.Spec.Backend.Options.ClientCert, - "--clientKey", - p.prune.Spec.Backend.Options.ClientKey, - }..., - ) - } + if p.prune.Spec.Backend.Options.CACert != "" { + args = append(args, []string{"-caCert", p.prune.Spec.Backend.Options.CACert}...) + } + if p.prune.Spec.Backend.Options.ClientCert != "" && p.prune.Spec.Backend.Options.ClientKey != "" { + args = append( + args, + []string{ + "-clientCert", + p.prune.Spec.Backend.Options.ClientCert, + "-clientKey", + p.prune.Spec.Backend.Options.ClientKey, + }..., + ) } return args @@ -194,10 +209,12 @@ func (p *PruneExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append(moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }) + moreVolumes = append( + moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }, + ) } return moreVolumes @@ -206,11 +223,8 @@ func (p *PruneExecutor) attachMoreVolumes() []corev1.Volume { func (p *PruneExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { var volumeMount []corev1.VolumeMount - if p.prune.Spec.Backend.S3 != nil && !utils.ZeroLen(p.prune.Spec.Backend.S3.VolumeMounts) { - volumeMount = *p.prune.Spec.Backend.S3.VolumeMounts - } - if p.prune.Spec.Backend.Rest != nil && !utils.ZeroLen(p.prune.Spec.Backend.Rest.VolumeMounts) { - volumeMount = *p.prune.Spec.Backend.Rest.VolumeMounts + if p.prune.Spec.Backend != nil && !utils.ZeroLen(p.prune.Spec.Backend.VolumeMounts) { + volumeMount = *p.prune.Spec.Backend.VolumeMounts } ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} diff --git a/operator/restorecontroller/executor.go b/operator/restorecontroller/executor.go index f480adf9c..c86c29f14 100644 --- a/operator/restorecontroller/executor.go +++ b/operator/restorecontroller/executor.go @@ -4,14 +4,16 @@ import ( "context" "errors" "fmt" + "github.com/k8up-io/k8up/v2/operator/utils" - "github.com/k8up-io/k8up/v2/operator/executor" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "github.com/k8up-io/k8up/v2/operator/executor" + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -51,7 +53,13 @@ func (r *RestoreExecutor) Execute(ctx context.Context) error { restoreJob, err := r.createRestoreObject(ctx, restore) if err != nil { log.Error(err, "unable to create or update restore object") - r.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "unable to create restore object: %v", err) + r.SetConditionFalseWithMessage( + ctx, + k8upv1.ConditionReady, + k8upv1.ReasonCreationFailed, + "unable to create restore object: %v", + err, + ) return nil } @@ -64,27 +72,35 @@ func (r *RestoreExecutor) cleanupOldRestores(ctx context.Context, restore *k8upv r.CleanupOldResources(ctx, &k8upv1.RestoreList{}, restore.Namespace, restore) } -func (r *RestoreExecutor) createRestoreObject(ctx context.Context, restore *k8upv1.Restore) (*batchv1.Job, error) { +func (r *RestoreExecutor) createRestoreObject( + ctx context.Context, + restore *k8upv1.Restore, +) (*batchv1.Job, error) { batchJob := &batchv1.Job{} batchJob.Name = r.jobName() batchJob.Namespace = restore.Namespace - _, err := controllerutil.CreateOrUpdate(ctx, r.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, restore, r.Config) - if mutateErr != nil { - return mutateErr - } - batchJob.Labels[job.K8upExclusive] = "true" - batchJob.Spec.Template.Spec.Containers[0].Env = r.setupEnvVars(ctx, restore) - restore.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - - volumes, volumeMounts := r.volumeConfig(restore) - batchJob.Spec.Template.Spec.Volumes = append(volumes, r.attachMoreVolumes()...) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = append(volumeMounts, r.attachMoreVolumeMounts()...) + _, err := controllerutil.CreateOrUpdate( + ctx, r.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, restore, r.Config) + if mutateErr != nil { + return mutateErr + } + batchJob.Labels[job.K8upExclusive] = "true" + batchJob.Spec.Template.Spec.Containers[0].Env = r.setupEnvVars(ctx, restore) + restore.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + + volumes, volumeMounts := r.volumeConfig(restore) + batchJob.Spec.Template.Spec.Volumes = append(volumes, r.attachMoreVolumes()...) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = append( + volumeMounts, + r.attachMoreVolumeMounts()..., + ) - args, argsErr := r.setupArgs(restore) - batchJob.Spec.Template.Spec.Containers[0].Args = args - return argsErr - }) + args, argsErr := r.setupArgs(restore) + batchJob.Spec.Template.Spec.Containers[0].Args = args + return argsErr + }, + ) return batchJob, err } @@ -94,9 +110,7 @@ func (r *RestoreExecutor) jobName() string { } func (r *RestoreExecutor) setupArgs(restore *k8upv1.Restore) ([]string, error) { - args := r.appendOptionsArgs() - - args = append(args, "-restore") + args := []string{"-varDir", cfg.Config.PodVarDir, "-restore"} if len(restore.Spec.Tags) > 0 { args = append(args, executor.BuildTagArgs(restore.Spec.Tags)...) } @@ -115,22 +129,33 @@ func (r *RestoreExecutor) setupArgs(restore *k8upv1.Restore) ([]string, error) { case restore.Spec.RestoreMethod.S3 != nil: args = append(args, "-restoreType", "s3") default: - return nil, fmt.Errorf("undefined restore method (-restoreType) on '%v/%v'", restore.Namespace, restore.Name) + return nil, fmt.Errorf( + "undefined restore method (-restoreType) on '%v/%v'", + restore.Namespace, + restore.Name, + ) } + args = append(args, r.appendOptionsArgs()...) + return args, nil } -func (r *RestoreExecutor) volumeConfig(restore *k8upv1.Restore) ([]corev1.Volume, []corev1.VolumeMount) { +func (r *RestoreExecutor) volumeConfig(restore *k8upv1.Restore) ( + []corev1.Volume, + []corev1.VolumeMount, +) { volumes := make([]corev1.Volume, 0) if restore.Spec.RestoreMethod.S3 == nil { - volumes = append(volumes, + volumes = append( + volumes, corev1.Volume{ Name: restore.Spec.RestoreMethod.Folder.ClaimName, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: restore.Spec.RestoreMethod.Folder.PersistentVolumeClaimVolumeSource, }, - }) + }, + ) } mounts := make([]corev1.VolumeMount, 0) @@ -145,7 +170,10 @@ func (r *RestoreExecutor) volumeConfig(restore *k8upv1.Restore) ([]corev1.Volume return volumes, mounts } -func (r *RestoreExecutor) setupEnvVars(ctx context.Context, restore *k8upv1.Restore) []corev1.EnvVar { +func (r *RestoreExecutor) setupEnvVars( + ctx context.Context, + restore *k8upv1.Restore, +) []corev1.EnvVar { log := controllerruntime.LoggerFrom(ctx) vars := executor.NewEnvVarConverter() @@ -171,7 +199,14 @@ func (r *RestoreExecutor) setupEnvVars(ctx context.Context, restore *k8upv1.Rest err := vars.Merge(executor.DefaultEnv(r.Obj.GetNamespace())) if err != nil { - log.Error(err, "error while merging the environment variables", "name", r.Obj.GetName(), "namespace", r.Obj.GetNamespace()) + log.Error( + err, + "error while merging the environment variables", + "name", + r.Obj.GetName(), + "namespace", + r.Obj.GetNamespace(), + ) } return vars.Convert() @@ -180,9 +215,7 @@ func (r *RestoreExecutor) setupEnvVars(ctx context.Context, restore *k8upv1.Rest func (r *RestoreExecutor) appendOptionsArgs() []string { var args []string - args = append(args, []string{"--varDir", cfg.Config.PodVarDir}...) - - if r.restore.Spec.Backend.Options != nil { + if r.restore.Spec.Backend != nil && r.restore.Spec.Backend.Options != nil { if r.restore.Spec.Backend.Options.CACert != "" { args = append(args, []string{"--caCert", r.restore.Spec.Backend.Options.CACert}...) } @@ -199,9 +232,12 @@ func (r *RestoreExecutor) appendOptionsArgs() []string { } } - if r.restore.Spec.RestoreMethod.Options != nil { + if r.restore.Spec.RestoreMethod != nil && r.restore.Spec.RestoreMethod.Options != nil { if r.restore.Spec.RestoreMethod.Options.CACert != "" { - args = append(args, []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.Options.CACert}...) + args = append( + args, + []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.Options.CACert}..., + ) } if r.restore.Spec.RestoreMethod.Options.ClientCert != "" && r.restore.Spec.RestoreMethod.Options.ClientKey != "" { args = append( @@ -245,10 +281,12 @@ func (r *RestoreExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append(moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }) + moreVolumes = append( + moreVolumes, corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + }, + ) } return moreVolumes @@ -257,11 +295,28 @@ func (r *RestoreExecutor) attachMoreVolumes() []corev1.Volume { func (r *RestoreExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { var volumeMount []corev1.VolumeMount - if r.restore.Spec.Backend.S3 != nil && !utils.ZeroLen(r.restore.Spec.Backend.S3.VolumeMounts) { - volumeMount = *r.restore.Spec.Backend.S3.VolumeMounts + if r.restore.Spec.Backend != nil && !utils.ZeroLen(r.restore.Spec.Backend.VolumeMounts) { + volumeMount = append(volumeMount, *r.restore.Spec.Backend.VolumeMounts...) } - if r.restore.Spec.Backend.Rest != nil && !utils.ZeroLen(r.restore.Spec.Backend.Rest.VolumeMounts) { - volumeMount = *r.restore.Spec.Backend.Rest.VolumeMounts + if r.restore.Spec.RestoreMethod != nil && !utils.ZeroLen(r.restore.Spec.RestoreMethod.VolumeMounts) { + for _, v1 := range *r.restore.Spec.RestoreMethod.VolumeMounts { + vm1 := v1 + var isExist bool + + for _, v2 := range volumeMount { + vm2 := v2 + if vm1.Name == vm2.Name && vm1.MountPath == vm2.MountPath { + isExist = true + break + } + } + + if isExist { + continue + } + + volumeMount = append(volumeMount, vm1) + } } ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} From ece84c7d8e75c8f4fe1418b60e4ce95e69d342c4 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:03:46 +0330 Subject: [PATCH 08/38] [UPDATE] Addin VolumeMounts to BackendSpec and RestoreMethod Change: - Removing VolumeMounts from S3Spec and RestServerSpec in BackendSpec. Adding to BackendSpec (File: v1/backend.go) - Adding VolumeMounts to RestoreMethod (File: v1/restore_types.go) Signed-off-by: poyaz --- api/v1/backend.go | 11 ++++++--- api/v1/restore_types.go | 7 +++--- api/v1/zz_generated.deepcopy.go | 44 ++++++++++++++++----------------- 3 files changed, 33 insertions(+), 29 deletions(-) diff --git a/api/v1/backend.go b/api/v1/backend.go index 93a4333ba..dd60cffab 100644 --- a/api/v1/backend.go +++ b/api/v1/backend.go @@ -26,7 +26,8 @@ type ( B2 *B2Spec `json:"b2,omitempty"` Rest *RestServerSpec `json:"rest,omitempty"` - Options *BackendOpts `json:"options,omitempty"` + Options *BackendOpts `json:"options,omitempty"` + VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } // +k8s:deepcopy-gen=false @@ -91,7 +92,11 @@ func IsNil(v interface{}) bool { return v == nil || (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) } -func addEnvVarFromSecret(vars map[string]*corev1.EnvVarSource, key string, ref *corev1.SecretKeySelector) { +func addEnvVarFromSecret( + vars map[string]*corev1.EnvVarSource, + key string, + ref *corev1.SecretKeySelector, +) { if ref != nil { vars[key] = &corev1.EnvVarSource{ SecretKeyRef: ref, @@ -118,7 +123,6 @@ type S3Spec struct { Bucket string `json:"bucket,omitempty"` AccessKeyIDSecretRef *corev1.SecretKeySelector `json:"accessKeyIDSecretRef,omitempty"` SecretAccessKeySecretRef *corev1.SecretKeySelector `json:"secretAccessKeySecretRef,omitempty"` - VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } // EnvVars returns the env vars for this backend. @@ -268,7 +272,6 @@ type RestServerSpec struct { URL string `json:"url,omitempty"` UserSecretRef *corev1.SecretKeySelector `json:"userSecretRef,omitempty"` PasswordSecretReg *corev1.SecretKeySelector `json:"passwordSecretReg,omitempty"` - VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } // EnvVars returns the env vars for this backend. diff --git a/api/v1/restore_types.go b/api/v1/restore_types.go index 081c2f2b2..5830e2b4e 100644 --- a/api/v1/restore_types.go +++ b/api/v1/restore_types.go @@ -35,9 +35,10 @@ type RestoreSpec struct { // RestoreMethod contains how and where the restore should happen // all the settings are mutual exclusive. type RestoreMethod struct { - S3 *S3Spec `json:"s3,omitempty"` - Folder *FolderRestore `json:"folder,omitempty"` - Options *RestoreOpts `json:"options,omitempty"` + S3 *S3Spec `json:"s3,omitempty"` + Folder *FolderRestore `json:"folder,omitempty"` + Options *RestoreOpts `json:"options,omitempty"` + VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } type FolderRestore struct { diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 071e11a83..3eeab0fa8 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -215,6 +215,17 @@ func (in *Backend) DeepCopyInto(out *Backend) { *out = new(BackendOpts) **out = **in } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = new([]corev1.VolumeMount) + if **in != nil { + in, out := *in, *out + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend. @@ -804,17 +815,6 @@ func (in *RestServerSpec) DeepCopyInto(out *RestServerSpec) { *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = new([]corev1.VolumeMount) - if **in != nil { - in, out := *in, *out - *out = make([]corev1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestServerSpec. @@ -904,6 +904,17 @@ func (in *RestoreMethod) DeepCopyInto(out *RestoreMethod) { *out = new(RestoreOpts) **out = **in } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = new([]corev1.VolumeMount) + if **in != nil { + in, out := *in, *out + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreMethod. @@ -1108,17 +1119,6 @@ func (in *S3Spec) DeepCopyInto(out *S3Spec) { *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = new([]corev1.VolumeMount) - if **in != nil { - in, out := *in, *out - *out = make([]corev1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec. From a56d46520b27a8fbd67b7821a48cb176d59796bb Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:03:58 +0330 Subject: [PATCH 09/38] [UPDATE] Generating new crd according to adding VolumeMounts to BackendSpec and RestoreMethodSpec Signed-off-by: poyaz --- .../v1/k8up.io_archives.yaml | 210 ++-- .../v1/k8up.io_backups.yaml | 126 +-- .../v1/k8up.io_checks.yaml | 126 +-- .../v1/k8up.io_prunes.yaml | 126 +-- .../v1/k8up.io_restores.yaml | 210 ++-- .../v1/k8up.io_schedules.yaml | 950 +++++++----------- 6 files changed, 643 insertions(+), 1105 deletions(-) diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml index a03ba65da..c18e7c277 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml @@ -329,48 +329,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -420,48 +378,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -470,6 +386,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object failedJobsHistoryLimit: description: |- @@ -793,49 +751,49 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object snapshot: type: string diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml index 1ea31e5aa..ec94920b1 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml @@ -336,48 +336,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -427,48 +385,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -477,6 +393,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object failedJobsHistoryLimit: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml index 46d761782..0eeb7c07e 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml @@ -331,48 +331,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -422,48 +380,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -472,6 +388,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object failedJobsHistoryLimit: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml index 1aaf76bc8..d571c79ae 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml @@ -331,48 +331,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -422,48 +380,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -472,6 +388,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object failedJobsHistoryLimit: description: |- diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml index f6e8d7684..2c3b55099 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml @@ -331,48 +331,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -422,48 +380,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -472,6 +388,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object failedJobsHistoryLimit: description: |- @@ -795,49 +753,49 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object snapshot: type: string diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml index 4e3a98315..7214682ed 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml @@ -322,48 +322,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -413,48 +371,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -463,6 +379,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object concurrentRunsAllowed: type: boolean @@ -788,49 +746,49 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object schedule: description: ScheduleDefinition is the actual cron-type expression @@ -1286,48 +1244,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -1377,48 +1293,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -1427,6 +1301,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object backup: description: BackupSchedule manages schedules for the backup service @@ -1711,48 +1627,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -1802,48 +1676,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -1852,6 +1684,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object concurrentRunsAllowed: type: boolean @@ -2570,48 +2444,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -2648,61 +2480,19 @@ spec: be a valid secret key. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? - type: string - optional: - description: Specify whether the Secret or its key - must be defined - type: boolean - required: - - key - type: object - x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic type: object swift: properties: @@ -2711,6 +2501,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object concurrentRunsAllowed: type: boolean @@ -3604,48 +3436,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -3695,48 +3485,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -3745,6 +3493,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object concurrentRunsAllowed: type: boolean @@ -4537,48 +4327,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object s3: properties: @@ -4628,48 +4376,6 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object swift: properties: @@ -4678,6 +4384,48 @@ spec: path: type: string type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object concurrentRunsAllowed: type: boolean @@ -5003,49 +4751,49 @@ spec: - key type: object x-kubernetes-map-type: atomic - volumeMounts: - items: - description: VolumeMount describes a mounting of a Volume - within a container. - properties: - mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. - type: string - name: - description: This must match the Name of a Volume. - type: string - readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. - type: boolean - subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). - type: string - subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array type: object + volumeMounts: + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array type: object schedule: description: ScheduleDefinition is the actual cron-type expression From 2af7fd68f35e1235041b7944162a3da4c612a6de Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:04:23 +0330 Subject: [PATCH 10/38] [ADD] Adding new resource definitions for e2e test in TLS and mTls mode These definitions contain below: - Adding archive Adding restore Adding backup Adding nginx for use reverse proxy in TLS and mTls mode Adding cert-manager for genrate self-signed issuer Signed-off-by: poyaz --- .../annotated-subject/deployment.yaml | 1 + e2e/definitions/annotated-subject/pod.yaml | 1 + .../archive/s3-mtls-archive-mtls.yaml | 50 +++++++++ .../archive/s3-mtls-archive-tls.yaml | 55 +++++++++ .../archive/s3-tls-archive-mtls.yaml | 55 +++++++++ .../archive/s3-tls-archive-tls.yaml | 46 ++++++++ e2e/definitions/backup/backup-mtls.yaml | 36 ++++++ e2e/definitions/backup/backup-tls.yaml | 34 ++++++ e2e/definitions/backup/backup.yaml | 2 +- e2e/definitions/cert/issure.yaml | 7 ++ e2e/definitions/cert/minio-ca.yaml | 30 +++++ e2e/definitions/cert/minio-mtls.yaml | 36 ++++++ e2e/definitions/cert/minio-tls.yaml | 18 +++ e2e/definitions/check/check-mtls.yaml | 36 ++++++ e2e/definitions/check/check-tls.yaml | 34 ++++++ e2e/definitions/proxy/config.yaml | 89 +++++++++++++++ e2e/definitions/proxy/deployment.yaml | 43 ++++++++ e2e/definitions/proxy/service.yaml | 35 ++++++ .../restore/restore-backupcommand.yaml | 2 +- e2e/definitions/restore/restore-mtls.yaml | 39 +++++++ e2e/definitions/restore/restore-tls.yaml | 37 +++++++ e2e/definitions/restore/restore.yaml | 2 +- .../restore/s3-mtls-restore-mtls.yaml | 50 +++++++++ .../restore/s3-mtls-restore-tls.yaml | 55 +++++++++ .../restore/s3-tls-restore-mtls.yaml | 55 +++++++++ .../restore/s3-tls-restore-tls.yaml | 46 ++++++++ e2e/definitions/subject-dl/deployment.yaml | 104 ++++++++++++++++++ 27 files changed, 995 insertions(+), 3 deletions(-) create mode 100644 e2e/definitions/archive/s3-mtls-archive-mtls.yaml create mode 100644 e2e/definitions/archive/s3-mtls-archive-tls.yaml create mode 100644 e2e/definitions/archive/s3-tls-archive-mtls.yaml create mode 100644 e2e/definitions/archive/s3-tls-archive-tls.yaml create mode 100644 e2e/definitions/backup/backup-mtls.yaml create mode 100644 e2e/definitions/backup/backup-tls.yaml create mode 100644 e2e/definitions/cert/issure.yaml create mode 100644 e2e/definitions/cert/minio-ca.yaml create mode 100644 e2e/definitions/cert/minio-mtls.yaml create mode 100644 e2e/definitions/cert/minio-tls.yaml create mode 100644 e2e/definitions/check/check-mtls.yaml create mode 100644 e2e/definitions/check/check-tls.yaml create mode 100644 e2e/definitions/proxy/config.yaml create mode 100644 e2e/definitions/proxy/deployment.yaml create mode 100644 e2e/definitions/proxy/service.yaml create mode 100644 e2e/definitions/restore/restore-mtls.yaml create mode 100644 e2e/definitions/restore/restore-tls.yaml create mode 100644 e2e/definitions/restore/s3-mtls-restore-mtls.yaml create mode 100644 e2e/definitions/restore/s3-mtls-restore-tls.yaml create mode 100644 e2e/definitions/restore/s3-tls-restore-mtls.yaml create mode 100644 e2e/definitions/restore/s3-tls-restore-tls.yaml create mode 100644 e2e/definitions/subject-dl/deployment.yaml diff --git a/e2e/definitions/annotated-subject/deployment.yaml b/e2e/definitions/annotated-subject/deployment.yaml index 32ec3f0b9..3e2943c4b 100644 --- a/e2e/definitions/annotated-subject/deployment.yaml +++ b/e2e/definitions/annotated-subject/deployment.yaml @@ -20,6 +20,7 @@ spec: spec: containers: - image: busybox + imagePullPolicy: IfNotPresent name: dummy-container-blocking-first-position command: - "/bin/sh" diff --git a/e2e/definitions/annotated-subject/pod.yaml b/e2e/definitions/annotated-subject/pod.yaml index 5e7742192..b64cd36f6 100644 --- a/e2e/definitions/annotated-subject/pod.yaml +++ b/e2e/definitions/annotated-subject/pod.yaml @@ -10,6 +10,7 @@ metadata: spec: containers: - image: busybox + imagePullPolicy: IfNotPresent name: dummy-container-blocking-first-position command: - "/bin/sh" diff --git a/e2e/definitions/archive/s3-mtls-archive-mtls.yaml b/e2e/definitions/archive/s3-mtls-archive-mtls.yaml new file mode 100644 index 000000000..4ef5d7d28 --- /dev/null +++ b/e2e/definitions/archive/s3-mtls-archive-mtls.yaml @@ -0,0 +1,50 @@ +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: k8up-s3-mtls-archive-mtls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: archive + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/archive/s3-mtls-archive-tls.yaml b/e2e/definitions/archive/s3-mtls-archive-tls.yaml new file mode 100644 index 000000000..7f0fd19cb --- /dev/null +++ b/e2e/definitions/archive/s3-mtls-archive-tls.yaml @@ -0,0 +1,55 @@ +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: k8up-s3-mtls-archive-tls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: archive + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/archive/s3-tls-archive-mtls.yaml b/e2e/definitions/archive/s3-tls-archive-mtls.yaml new file mode 100644 index 000000000..5520a5cb8 --- /dev/null +++ b/e2e/definitions/archive/s3-tls-archive-mtls.yaml @@ -0,0 +1,55 @@ +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: k8up-s3-tls-archive-mtls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: archive + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/archive/s3-tls-archive-tls.yaml b/e2e/definitions/archive/s3-tls-archive-tls.yaml new file mode 100644 index 000000000..5c2930019 --- /dev/null +++ b/e2e/definitions/archive/s3-tls-archive-tls.yaml @@ -0,0 +1,46 @@ +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: k8up-s3-tls-archive-tls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: archive + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 diff --git a/e2e/definitions/backup/backup-mtls.yaml b/e2e/definitions/backup/backup-mtls.yaml new file mode 100644 index 000000000..e949f877a --- /dev/null +++ b/e2e/definitions/backup/backup-mtls.yaml @@ -0,0 +1,36 @@ +apiVersion: k8up.io/v1 +kind: Backup +metadata: + name: k8up-backup-mtls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/backup/backup-tls.yaml b/e2e/definitions/backup/backup-tls.yaml new file mode 100644 index 000000000..9487dbed2 --- /dev/null +++ b/e2e/definitions/backup/backup-tls.yaml @@ -0,0 +1,34 @@ +apiVersion: k8up.io/v1 +kind: Backup +metadata: + name: k8up-backup-tls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 diff --git a/e2e/definitions/backup/backup.yaml b/e2e/definitions/backup/backup.yaml index 3212ac6bb..781f3c6dd 100644 --- a/e2e/definitions/backup/backup.yaml +++ b/e2e/definitions/backup/backup.yaml @@ -11,7 +11,7 @@ spec: name: backup-repo key: password s3: - endpoint: http://minio.minio.svc.cluster.local:9000 + endpoint: http://minio.minio-e2e.svc.cluster.local:9000 bucket: backup accessKeyIDSecretRef: name: backup-credentials diff --git a/e2e/definitions/cert/issure.yaml b/e2e/definitions/cert/issure.yaml new file mode 100644 index 000000000..50117f029 --- /dev/null +++ b/e2e/definitions/cert/issure.yaml @@ -0,0 +1,7 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: minio-e2e +spec: + selfSigned: { } diff --git a/e2e/definitions/cert/minio-ca.yaml b/e2e/definitions/cert/minio-ca.yaml new file mode 100644 index 000000000..3783e35e5 --- /dev/null +++ b/e2e/definitions/cert/minio-ca.yaml @@ -0,0 +1,30 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: minio-root-ca + namespace: minio-e2e +spec: + isCA: true + commonName: minio-root-ca + subject: + organizations: + - Minio + secretName: minio-root-ca + duration: 17520h0m0s + renewBefore: 2190h0m0s + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: Issuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: minio-intermediate-ca + namespace: minio-e2e +spec: + ca: + secretName: minio-root-ca diff --git a/e2e/definitions/cert/minio-mtls.yaml b/e2e/definitions/cert/minio-mtls.yaml new file mode 100644 index 000000000..7a3a13437 --- /dev/null +++ b/e2e/definitions/cert/minio-mtls.yaml @@ -0,0 +1,36 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: minio-server-mtls + namespace: minio-e2e +spec: + isCA: false + secretName: minio-server-mtls + dnsNames: + - minio-mtls.minio-e2e.svc.cluster.local + - minio-mtls.minio-e2e + - minio-mtls + issuerRef: + name: minio-intermediate-ca + duration: 8760h + renewBefore: 2190h + usages: + - server auth + - client auth +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: minio-client-mtls + namespace: minio-e2e +spec: + secretName: minio-client-mtls + isCA: false + duration: 2160h + renewBefore: 720m + usages: + - server auth + - client auth + commonName: "minio-mtls" + issuerRef: + name: minio-intermediate-ca diff --git a/e2e/definitions/cert/minio-tls.yaml b/e2e/definitions/cert/minio-tls.yaml new file mode 100644 index 000000000..2de901a30 --- /dev/null +++ b/e2e/definitions/cert/minio-tls.yaml @@ -0,0 +1,18 @@ +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: minio-server-tls + namespace: minio-e2e +spec: + isCA: false + secretName: minio-server-tls + dnsNames: + - minio-tls.minio-e2e.svc.cluster.local + - minio-tls.minio-e2e + - minio-tls + issuerRef: + name: minio-intermediate-ca + duration: 8760h + renewBefore: 2190h + usages: + - server auth diff --git a/e2e/definitions/check/check-mtls.yaml b/e2e/definitions/check/check-mtls.yaml new file mode 100644 index 000000000..a8313251a --- /dev/null +++ b/e2e/definitions/check/check-mtls.yaml @@ -0,0 +1,36 @@ +apiVersion: k8up.io/v1 +kind: Check +metadata: + name: k8up-check-mtls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/check/check-tls.yaml b/e2e/definitions/check/check-tls.yaml new file mode 100644 index 000000000..2e27ec3e1 --- /dev/null +++ b/e2e/definitions/check/check-tls.yaml @@ -0,0 +1,34 @@ +apiVersion: k8up.io/v1 +kind: Check +metadata: + name: k8up-check-tls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 diff --git a/e2e/definitions/proxy/config.yaml b/e2e/definitions/proxy/config.yaml new file mode 100644 index 000000000..fcc7f268d --- /dev/null +++ b/e2e/definitions/proxy/config.yaml @@ -0,0 +1,89 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-conf + namespace: minio-e2e +data: + nginx.conf: | + user nginx; + worker_processes 1; + error_log /var/log/nginx/error.log warn; + pid /var/run/nginx.pid; + events { + worker_connections 1024; + } + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /var/log/nginx/access.log main; + sendfile on; + keepalive_timeout 65; + + server { + server_name minio-tls minio-tls.minio-e2e minio-tls.minio-e2e.svc.cluster.local; + listen 443 ssl; + ssl_certificate /mnt/tls/tls.crt; + ssl_certificate_key /mnt/tls/tls.key; + ssl_client_certificate /mnt/tls/ca.crt; + + # Allow special characters in headers + ignore_invalid_headers off; + # Allow any size file to be uploaded. + # Set to a value such as 1000m; to restrict file size to a specific value + client_max_body_size 0; + # Disable buffering + proxy_buffering off; + proxy_request_buffering off; + + location / { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + # Default is HTTP/1, keepalive is only enabled in HTTP/1.1 + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + + proxy_pass http://minio.minio-e2e.svc.cluster.local:9000; + } + } + + server { + server_name minio-mtls minio-mtls.minio-e2e minio-mtls.minio-e2e.svc.cluster.local; + listen 443 ssl; + ssl_certificate /mnt/mtls/tls.crt; + ssl_certificate_key /mnt/mtls/tls.key; + ssl_client_certificate /mnt/mtls/ca.crt; + ssl_verify_client on; + + # Allow special characters in headers + ignore_invalid_headers off; + # Allow any size file to be uploaded. + # Set to a value such as 1000m; to restrict file size to a specific value + client_max_body_size 0; + # Disable buffering + proxy_buffering off; + proxy_request_buffering off; + + location / { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + # Default is HTTP/1, keepalive is only enabled in HTTP/1.1 + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + + proxy_pass http://minio.minio-e2e.svc.cluster.local:9000; + } + } + } diff --git a/e2e/definitions/proxy/deployment.yaml b/e2e/definitions/proxy/deployment.yaml new file mode 100644 index 000000000..b450f0c74 --- /dev/null +++ b/e2e/definitions/proxy/deployment.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + namespace: minio-e2e + labels: + app: nginx +spec: + selector: + matchLabels: + app: nginx + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.24.0 + ports: + - containerPort: 80 + - containerPort: 443 + volumeMounts: + - name: nginx-config + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: minio-tls + mountPath: /mnt/tls/ + - name: minio-mtls + mountPath: /mnt/mtls/ + volumes: + - name: nginx-config + configMap: + name: nginx-conf + - name: minio-tls + secret: + secretName: minio-server-tls + defaultMode: 420 + - name: minio-mtls + secret: + secretName: minio-server-mtls + defaultMode: 420 diff --git a/e2e/definitions/proxy/service.yaml b/e2e/definitions/proxy/service.yaml new file mode 100644 index 000000000..83cf9c5f2 --- /dev/null +++ b/e2e/definitions/proxy/service.yaml @@ -0,0 +1,35 @@ +kind: Service +apiVersion: v1 +metadata: + name: minio-tls + namespace: minio-e2e +spec: + selector: + app: nginx + ports: + - protocol: TCP + port: 80 + targetPort: 80 + name: nginx-tls-80 + - protocol: TCP + port: 443 + targetPort: 443 + name: nginx-tls-443 +--- +kind: Service +apiVersion: v1 +metadata: + name: minio-mtls + namespace: minio-e2e +spec: + selector: + app: nginx + ports: + - protocol: TCP + port: 80 + targetPort: 80 + name: nginx-mtls-80 + - protocol: TCP + port: 443 + targetPort: 443 + name: nginx-mtls-443 diff --git a/e2e/definitions/restore/restore-backupcommand.yaml b/e2e/definitions/restore/restore-backupcommand.yaml index b4c85c042..8591b20b2 100644 --- a/e2e/definitions/restore/restore-backupcommand.yaml +++ b/e2e/definitions/restore/restore-backupcommand.yaml @@ -15,7 +15,7 @@ spec: name: backup-repo key: password s3: - endpoint: http://minio.minio.svc.cluster.local:9000 + endpoint: http://minio.minio-e2e.svc.cluster.local:9000 bucket: backup accessKeyIDSecretRef: name: backup-credentials diff --git a/e2e/definitions/restore/restore-mtls.yaml b/e2e/definitions/restore/restore-mtls.yaml new file mode 100644 index 000000000..6f89631a5 --- /dev/null +++ b/e2e/definitions/restore/restore-mtls.yaml @@ -0,0 +1,39 @@ +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: k8up-restore-mtls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + folder: + claimName: subject-pvc + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/restore/restore-tls.yaml b/e2e/definitions/restore/restore-tls.yaml new file mode 100644 index 000000000..c19824413 --- /dev/null +++ b/e2e/definitions/restore/restore-tls.yaml @@ -0,0 +1,37 @@ +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: k8up-restore-tls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + folder: + claimName: subject-pvc + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 diff --git a/e2e/definitions/restore/restore.yaml b/e2e/definitions/restore/restore.yaml index 05f5125f0..a8c60be44 100644 --- a/e2e/definitions/restore/restore.yaml +++ b/e2e/definitions/restore/restore.yaml @@ -14,7 +14,7 @@ spec: name: backup-repo key: password s3: - endpoint: http://minio.minio.svc.cluster.local:9000 + endpoint: http://minio.minio-e2e.svc.cluster.local:9000 bucket: backup accessKeyIDSecretRef: name: backup-credentials diff --git a/e2e/definitions/restore/s3-mtls-restore-mtls.yaml b/e2e/definitions/restore/s3-mtls-restore-mtls.yaml new file mode 100644 index 000000000..3c3afe4ec --- /dev/null +++ b/e2e/definitions/restore/s3-mtls-restore-mtls.yaml @@ -0,0 +1,50 @@ +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: k8up-s3-mtls-restore-mtls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: restore + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/restore/s3-mtls-restore-tls.yaml b/e2e/definitions/restore/s3-mtls-restore-tls.yaml new file mode 100644 index 000000000..e5800187c --- /dev/null +++ b/e2e/definitions/restore/s3-mtls-restore-tls.yaml @@ -0,0 +1,55 @@ +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: k8up-s3-mtls-restore-tls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: restore + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/restore/s3-tls-restore-mtls.yaml b/e2e/definitions/restore/s3-tls-restore-mtls.yaml new file mode 100644 index 000000000..368eaffd9 --- /dev/null +++ b/e2e/definitions/restore/s3-tls-restore-mtls.yaml @@ -0,0 +1,55 @@ +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: k8up-s3-tls-restore-mtls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: restore + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/restore/s3-tls-restore-tls.yaml b/e2e/definitions/restore/s3-tls-restore-tls.yaml new file mode 100644 index 000000000..71bc61223 --- /dev/null +++ b/e2e/definitions/restore/s3-tls-restore-tls.yaml @@ -0,0 +1,46 @@ +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: k8up-s3-tls-restore-tls + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: restore + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + options: + caCert: /mnt/ca/ca.crt + s3: + endpoint: https://minio-tls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-ca-tls + mountPath: /mnt/ca/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-ca-tls + secret: + secretName: minio-ca-tls + defaultMode: 420 diff --git a/e2e/definitions/subject-dl/deployment.yaml b/e2e/definitions/subject-dl/deployment.yaml new file mode 100644 index 000000000..6da061cc2 --- /dev/null +++ b/e2e/definitions/subject-dl/deployment.yaml @@ -0,0 +1,104 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: subject-dl-deployment + namespace: k8up-e2e-subject +spec: + replicas: 1 + selector: + matchLabels: + app: subject-dl + template: + metadata: + labels: + app: subject-dl + spec: + initContainers: + - name: restic-container + image: ghcr.io/k8up-io/k8up:latest + imagePullPolicy: IfNotPresent + command: + - /bin/bash + - -c + - restic ls latest --json | awk '{ print; exit }' | awk '{print ("backup-" gensub(/.*"hostname"\s*:\s*"([^".]+)".*/, "\\1", "g") "-" gensub(/.*"paths"\s*:\[\s*"([^"]+\/)?([^".]+)".*\].*/, "\\2", "g") "-" gensub(/.*"time"\s*:\s*"([^".]+)(.[0-9]+)?Z".*/, "\\1Z", "g") ".tar.gz")}' > /mnt/share/filename.txt + env: + - name: RESTIC_REPOSITORY + value: s3:http://minio.minio-e2e.svc.cluster.local:9000/backup + - name: RESTIC_PASSWORD + valueFrom: + secretKeyRef: + name: backup-repo + key: password + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: backup-credentials + key: username + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: backup-credentials + key: password + securityContext: + runAsUser: $ID + volumeMounts: + - name: share + mountPath: /mnt/share + - name: minio-mc-container + image: minio/mc + imagePullPolicy: IfNotPresent + command: + - bash + - -c + - | + mc alias set minio $MINIO_HOST $MINIO_USERNAME $MINIO_PASSWORD + mc cp minio/$MINIO_BUCKET/$(cat /mnt/share/filename.txt) /data/ + env: + - name: MC_CONFIG_DIR + value: /conf/.mc + - name: MINIO_HOST + value: http://minio.minio-e2e.svc.cluster.local:9000 + - name: MINIO_BUCKET + value: restore + - name: MINIO_USERNAME + valueFrom: + secretKeyRef: + name: backup-credentials + key: username + - name: MINIO_PASSWORD + valueFrom: + secretKeyRef: + name: backup-credentials + key: password + securityContext: + runAsUser: $ID + volumeMounts: + - name: volume + mountPath: /data + - name: share + mountPath: /mnt/share + - name: conf + mountPath: /conf + containers: + - name: subject-container + image: quay.io/prometheus/busybox:latest + imagePullPolicy: IfNotPresent + args: + - sh + - -c + - | + tar -xzf *.tar.gz --strip-components=2 + sleep infinity + workingDir: /data/ + securityContext: + runAsUser: 1000 + volumeMounts: + - name: volume + mountPath: /data + volumes: + - name: volume + emptyDir: { } + - name: share + emptyDir: { } + - name: conf + emptyDir: { } From f391110e89f81be567e33a698d25379033945317 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:04:44 +0330 Subject: [PATCH 11/38] [ADD] Add some fucntions for checking e2e test These fucntions add: - Adding "mc" function for using minio client for using download files, remove buckets, get list of files - Adding "given_a_clean_archive" function for clear archive bucket - Adding "given_a_subject_dl" function for apply deployment for checking last backup when restore in S3 - Adding "give_self_signed_issuer" function for create self-signed issuer - Adding "expect_dl_file_in_container" function for checking is last backup was uploaded in S3 is okay Also fix some bugs: - Fixing empty output when get last dump of snapshot - becuase of syncing and storing file in disk, fetching last dump is took and the output of "run restic dump latest" is empty - Adding sleep before running restic and mc Signed-off-by: poyaz --- e2e/lib/k8up.bash | 119 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 106 insertions(+), 13 deletions(-) diff --git a/e2e/lib/k8up.bash b/e2e/lib/k8up.bash index 55ea298cf..eddaf4a95 100755 --- a/e2e/lib/k8up.bash +++ b/e2e/lib/k8up.bash @@ -1,6 +1,6 @@ #!/bin/bash -export MINIO_NAMESPACE=${MINIO_NAMESPACE-minio} +export MINIO_NAMESPACE=${MINIO_NAMESPACE:-minio-e2e} directory=$(dirname "${BASH_SOURCE[0]}") source "$directory/detik.bash" @@ -53,6 +53,7 @@ clear_pv_data() { } restic() { + sleep 3 kubectl run "restic-$(timestamp)" \ --attach \ --restart Never \ @@ -66,11 +67,27 @@ restic() { --command -- \ restic \ --no-cache \ - --repo "s3:http://minio.minio.svc.cluster.local:9000/backup" \ + --repo "s3:http://minio.${MINIO_NAMESPACE}.svc.cluster.local:9000/backup" \ "${@}" \ --json } +mc() { + sleep 3 + kubectl run "minio-mc-$(timestamp)" \ + --attach \ + --restart Never \ + --namespace "${DETIK_CLIENT_NAMESPACE-"k8up-system"}" \ + --image "minio/mc" \ + --image-pull-policy "IfNotPresent" \ + --env "MC_HOST_minio=http://minioadmin:minioadmin@minio.minio-e2e.svc.cluster.local:9000" \ + --pod-running-timeout 60s \ + --quiet \ + --command -- \ + mc \ + "${@}" +} + replace_in_file() { require_args 3 ${#} @@ -95,7 +112,6 @@ prepare() { mkdir -p "${target_dir}" cp -r "definitions" "debug/definitions" - replace_in_file "${target_file}" E2E_IMAGE "'${E2E_IMAGE}'" replace_in_file "${target_file}" ID "$(id -u)" replace_in_file "${target_file}" BACKUP_FILE_NAME "${BACKUP_FILE_NAME}" @@ -110,6 +126,15 @@ given_a_clean_ns() { echo "✅ The namespace '${DETIK_CLIENT_NAMESPACE}' is ready." } +given_a_clean_archive() { + require_args 1 ${#} + + bucket=${1} + + run mc mb "minio/${bucket}" --ignore-existing + run mc rm --recursive --force "minio/${bucket}" +} + given_a_subject() { require_args 2 ${#} @@ -122,6 +147,12 @@ given_a_subject() { echo "✅ The subject is ready" } +given_a_subject_dl() { + yq e '.spec.template.spec.initContainers[0].image="'${E2E_IMAGE}'" | .spec.template.spec.initContainers[0].securityContext.runAsUser='$(id -u)' | .spec.template.spec.initContainers[1].securityContext.runAsUser='$(id -u)' | .spec.template.spec.containers[0].securityContext.runAsUser='$(id -u)'' definitions/subject-dl/deployment.yaml | kubectl apply -f - + + echo "✅ The subject download is ready" +} + given_an_annotated_subject() { require_args 2 ${#} @@ -135,14 +166,14 @@ given_an_annotated_subject() { } given_an_annotated_subject_pod() { - require_args 2 ${#} + require_args 2 ${#} - export BACKUP_FILE_NAME=${1} - export BACKUP_FILE_CONTENT=${2} + export BACKUP_FILE_NAME=${1} + export BACKUP_FILE_CONTENT=${2} - yq e '.spec.containers[1].securityContext.runAsUser='$(id -u)' | .spec.containers[1].env[0].value=strenv(BACKUP_FILE_CONTENT) | .spec.containers[1].env[1].value=strenv(BACKUP_FILE_NAME)' definitions/annotated-subject/pod.yaml | kubectl apply -f - + yq e '.spec.containers[1].securityContext.runAsUser='$(id -u)' | .spec.containers[1].env[0].value=strenv(BACKUP_FILE_CONTENT) | .spec.containers[1].env[1].value=strenv(BACKUP_FILE_NAME)' definitions/annotated-subject/pod.yaml | kubectl apply -f - - echo "✅ The annotated subject pod is ready" + echo "✅ The annotated subject pod is ready" } given_a_rwo_pvc_subject_in_worker_node() { @@ -169,7 +200,7 @@ given_a_rwo_pvc_subject_in_controlplane_node() { given_s3_storage() { # Speed this step up - (helm -n "${MINIO_NAMESPACE}" list | grep minio > /dev/null) && return + (helm -n "${MINIO_NAMESPACE}" list | grep minio >/dev/null) && return helm repo add minio https://charts.min.io/ --force-update helm repo update helm upgrade --install minio \ @@ -181,15 +212,37 @@ given_s3_storage() { echo "✅ S3 Storage is ready" } +give_self_signed_issuer() { + ns=${NAMESPACE=${DETIK_CLIENT_NAMESPACE}} + + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.0/cert-manager.yaml + + kubectl wait -n cert-manager --for=condition=Available deployment/cert-manager-webhook --timeout=120s + yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/cert/issure.yaml | kubectl apply -f - + yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/cert/minio-ca.yaml | kubectl apply -f - + yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/cert/minio-tls.yaml | kubectl apply -f - + yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/cert/minio-mtls.yaml | kubectl apply -f - + + yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/proxy/config.yaml | kubectl apply -f - + yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/proxy/deployment.yaml | kubectl apply -f - + yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/proxy/service.yaml | kubectl apply -f - + + kubectl wait -n "${MINIO_NAMESPACE}" --for=condition=Ready certificates/minio-server-tls --timeout=60s + kubectl get secret -n "${MINIO_NAMESPACE}" minio-server-tls -o yaml | yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${DETIK_CLIENT_NAMESPACE}\"' | .metadata.name="minio-ca-tls" | del(.metadata.uid) | del(.metadata.resourceVersion) | del(.metadata.annotations) | del(.data."tls.crt") | del(.data."tls.key") | del(.type)' | kubectl apply -f - + + kubectl wait -n "${MINIO_NAMESPACE}" --for=condition=Ready certificates/minio-client-mtls --timeout=60s + kubectl get secret -n "${MINIO_NAMESPACE}" minio-client-mtls -o yaml | yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${DETIK_CLIENT_NAMESPACE}\"' | del(.metadata.uid) | del(.metadata.resourceVersion) | del(.metadata.annotations)' | kubectl apply -f - +} + given_a_clean_s3_storage() { # uninstalling an then installing the helmchart unfortunatelly hangs ong GH actions given_s3_storage - kubectl -n "${MINIO_NAMESPACE}" scale deployment minio --replicas 0 + kubectl -n "${MINIO_NAMESPACE}" scale deployment minio --replicas 0 kubectl -n "${MINIO_NAMESPACE}" delete pvc minio yq e '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/minio/pvc.yaml | kubectl apply -f - - kubectl -n "${MINIO_NAMESPACE}" scale deployment minio --replicas 1 + kubectl -n "${MINIO_NAMESPACE}" scale deployment minio --replicas 1 echo "✅ S3 Storage cleaned" } @@ -230,7 +283,14 @@ given_an_existing_backup() { wait_until backup/k8up-backup completed verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Backup named 'k8up-backup'" - run restic dump latest "/data/subject-pvc/${backup_file_name}" + for i in {1..3}; do + run restic dump latest "/data/subject-pvc/${backup_file_name}" + if [ ! -z "${output}" ]; then + break + fi + sleep 3 + done + # shellcheck disable=SC2154 [ "${backup_file_content}" = "${output}" ] @@ -362,7 +422,7 @@ wait_until() { ns=${NAMESPACE=${DETIK_CLIENT_NAMESPACE}} echo "Waiting for '${object}' in namespace '${ns}' to become '${condition}' ..." - kubectl -n "${ns}" wait --timeout 2m --for "condition=${condition}" "${object}" + kubectl -n "${ns}" wait --timeout 5m --for "condition=${condition}" "${object}" } expect_file_in_container() { @@ -395,6 +455,39 @@ expect_file_in_container() { done } +expect_dl_file_in_container() { + require_args 4 ${#} + + given_a_subject_dl + wait_until deployment/subject-dl-deployment available + + local pod container expected_file expected_content + pod=${1} + container=${2} + expected_file=${3} + expected_content=${4} + + commands=( + "ls -la \"$(dirname "${expected_file}")\"" + "test -f \"${expected_file}\"" + "cat \"${expected_file}\"" + "test \"${expected_content}\" \"=\" \"\$(cat \"${expected_file}\")\" " + ) + + echo "Testing if file '${expected_file}' contains '${expected_content}' in container '${container}' of pod '${pod}':" + + for cmd in "${commands[@]}"; do + echo "> by running the command \`sh -c '${cmd}'\`." + kubectl exec \ + "${pod}" \ + --container "${container}" \ + --stdin \ + --namespace "${DETIK_CLIENT_NAMESPACE}" \ + -- sh -c "${cmd}" + echo '↩' + done +} + get_latest_snap() { ns=${NAMESPACE=${DETIK_CLIENT_NAMESPACE}} From 5270d547ff6d91fd9255ab353a28783304cede15 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:05:02 +0330 Subject: [PATCH 12/38] [ADD] Adding new e2e test for supporting self-signed issuer This test contains below sections: - Testing backup API for TLS and mTLS mode - Testing restore API in pvc for TLS and mTLS mode - Testing restore API in S3 for TLS and mTLS mode - Testing archive API in S3 for TLS and mTLS mode - Testin check API for TLS and mTLS mode Signed-off-by: poyaz --- e2e/test-10-self-signed-tls.bats | 456 +++++++++++++++++++++++++++++++ 1 file changed, 456 insertions(+) create mode 100644 e2e/test-10-self-signed-tls.bats diff --git a/e2e/test-10-self-signed-tls.bats b/e2e/test-10-self-signed-tls.bats new file mode 100644 index 000000000..6fa1b5f4e --- /dev/null +++ b/e2e/test-10-self-signed-tls.bats @@ -0,0 +1,456 @@ +#!/usr/bin/env bats + +load "lib/utils" +load "lib/detik" +load "lib/k8up" + +# shellcheck disable=SC2034 +DETIK_CLIENT_NAME="kubectl" +# shellcheck disable=SC2034 +DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" +# shellcheck disable=SC2034 +DEBUG_DETIK="true" + +### Start backup section + +@test "Given a PVC, When creating a Backup (TLS) of an app, Then expect Restic repository - using self-signed issuer" { + expected_content="expected content for tls: $(timestamp)" + expected_filename="expected_filename.txt" + + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_a_subject "${expected_filename}" "${expected_content}" + + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup-tls.yaml | kubectl apply -f - + + try "at most 10 times every 5s to get backup named 'k8up-backup-tls' and verify that '.status.started' is 'true'" + verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup-tls' '.status.active' 1 true + + wait_until backup/k8up-backup-tls completed + + run restic snapshots + + echo "---BEGIN restic snapshots output---" + echo "${output}" + echo "---END---" + + echo -n "Number of Snapshots >= 1? " + jq -e 'length >= 1' <<< "${output}" # Ensure that there was actually a backup created + + run get_latest_snap + + run restic dump "${output}" "/data/subject-pvc/${expected_filename}" + + echo "---BEGIN actual ${expected_filename}---" + echo "${output}" + echo "---END---" + + [ "${output}" = "${expected_content}" ] +} + +@test "Given a PVC, When creating a Backup (mTLS) of an app, Then expect Restic repository - using self-signed issuer" { + expected_content="expected content for mtls: $(timestamp)" + expected_filename="expected_filename.txt" + + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_a_subject "${expected_filename}" "${expected_content}" + + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 5s to get backup named 'k8up-backup-mtls' and verify that '.status.started' is 'true'" + verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup-mtls' '.status.active' 1 true + + wait_until backup/k8up-backup-mtls completed + + run restic snapshots + + echo "---BEGIN restic snapshots output---" + echo "${output}" + echo "---END---" + + echo -n "Number of Snapshots >= 1? " + jq -e 'length >= 1' <<< "${output}" # Ensure that there was actually a backup created + + run get_latest_snap + + run restic dump "${output}" "/data/subject-pvc/${expected_filename}" + + echo "---BEGIN actual ${expected_filename}---" + echo "${output}" + echo "---END---" + + [ "${output}" = "${expected_content}" ] +} + +### End backup section + +### Start restore to pvc section + +@test "Given an existing Restic repository, When creating a Restore (TLS), Then Restore to PVC - using self-signed issuer" { + # Backup + expected_content="Old content for tls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Delete and create new subject + new_content="New content for tls: $(timestamp)" + new_filename="new_file.txt" + given_a_clean_ns + give_self_signed_issuer + given_a_subject "${new_filename}" "${new_content}" + + # Restore + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/restore-tls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-restore-tls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-restore-tls' and verify that '.status.active' is '1'" + + wait_until restore/k8up-restore-tls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-restore-tls'" + + expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" + expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${new_filename}" "${new_content}" +} + +@test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to PVC - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Delete and create new subject + new_content="New content for mtls: $(timestamp)" + new_filename="new_file.txt" + given_a_clean_ns + give_self_signed_issuer + given_a_subject "${new_filename}" "${new_content}" + + # Restore + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/restore-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-restore-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-restore-mtls' and verify that '.status.active' is '1'" + + wait_until restore/k8up-restore-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-restore-mtls'" + + expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" + expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${new_filename}" "${new_content}" +} + +### End restore to pvc section + +### Start restore to s3 section + +@test "Given an existing Restic repository, When creating a Restore (TLS), Then Restore to S3 (TLS) - using self-signed issuer" { + # Backup + expected_content="Old content for tls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Restore + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-tls-restore-tls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-s3-tls-restore-tls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-tls-restore-tls' and verify that '.status.active' is '1'" + + wait_until restore/k8up-s3-tls-restore-tls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-tls-restore-tls'" + + expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" +} + +@test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to S3 (TLS) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Restore + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-tls-restore-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-s3-tls-restore-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-tls-restore-mtls' and verify that '.status.active' is '1'" + + wait_until restore/k8up-s3-tls-restore-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-tls-restore-mtls'" + + expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" +} + +@test "Given an existing Restic repository, When creating a Restore (TLS), Then Restore to S3 (mTLS) - using self-signed issuer" { + # Backup + expected_content="Old content for tls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Restore + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-tls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-tls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-tls' and verify that '.status.active' is '1'" + + wait_until restore/k8up-s3-mtls-restore-tls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-tls'" + + expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" +} + +@test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Restore + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-mtls' and verify that '.status.active' is '1'" + + wait_until restore/k8up-s3-mtls-restore-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-mtls'" + + expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" +} + +### End restore to s3 section + +### Start archive to s3 section + +@test "Given an existing Restic repository, When creating a Archive (TLS), Then Restore to S3 (TLS) - using self-signed issuer" { + # Backup + expected_content="Old content for tls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + given_a_clean_archive archive + + # Archive + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-tls-archive-tls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Archive named 'k8up-s3-tls-archive-tls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-tls-archive-tls' and verify that '.status.active' is '1'" + + wait_until archive/k8up-s3-tls-archive-tls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-tls-archive-tls'" + + run restic list snapshots + + echo "---BEGIN total restic snapshots output---" + total_snapshots=$(echo -e "${output}" | wc -l) + echo "${total_snapshots}" + echo "---END---" + + run mc ls minio/archive + + echo "---BEGIN total archives output---" + total_archives=$(echo -e "${output}" | wc -l) + echo "${total_archives}" + echo "---END---" + + [ "$total_snapshots" -eq "$total_archives" ] +} + +@test "Given an existing Restic repository, When creating a Archive (mTLS), Then Restore to S3 (TLS) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + given_a_clean_archive archive + + # Archive + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-tls-archive-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Archive named 'k8up-s3-tls-archive-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-tls-archive-mtls' and verify that '.status.active' is '1'" + + wait_until archive/k8up-s3-tls-archive-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-tls-archive-mtls'" + + run restic list snapshots + + echo "---BEGIN total restic snapshots output---" + total_snapshots=$(echo -e "${output}" | wc -l) + echo "${total_snapshots}" + echo "---END---" + + run mc ls minio/archive + + echo "---BEGIN total archives output---" + total_archives=$(echo -e "${output}" | wc -l) + echo "${total_archives}" + echo "---END---" + + [ "$total_snapshots" -eq "$total_archives" ] +} + +@test "Given an existing Restic repository, When creating a Archive (TLS), Then Restore to S3 (mTLS) - using self-signed issuer" { + # Backup + expected_content="Old content for tls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + given_a_clean_archive archive + + # Archive + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-tls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-tls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-tls' and verify that '.status.active' is '1'" + + wait_until archive/k8up-s3-mtls-archive-tls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-tls'" + + run restic list snapshots + + echo "---BEGIN total restic snapshots output---" + total_snapshots=$(echo -e "${output}" | wc -l) + echo "${total_snapshots}" + echo "---END---" + + run mc ls minio/archive + + echo "---BEGIN total archives output---" + total_archives=$(echo -e "${output}" | wc -l) + echo "${total_archives}" + echo "---END---" + + [ "$total_snapshots" -eq "$total_archives" ] +} + +@test "Given an existing Restic repository, When creating a Archive (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + given_a_clean_archive archive + + # Archive + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-mtls' and verify that '.status.active' is '1'" + + wait_until archive/k8up-s3-mtls-archive-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-mtls'" + + run restic list snapshots + + echo "---BEGIN total restic snapshots output---" + total_snapshots=$(echo -e "${output}" | wc -l) + echo "${total_snapshots}" + echo "---END---" + + run mc ls minio/archive + + echo "---BEGIN total archives output---" + total_archives=$(echo -e "${output}" | wc -l) + echo "${total_archives}" + echo "---END---" + + [ "$total_snapshots" -eq "$total_archives" ] +} + +### End archive to s3 section + +### Start check section + +@test "Given a PVC, When creating a Check (TLS) of an app, Then expect Restic repository - using self-signed issuer" { + # Backup + expected_content="Old content for tls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Check + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/check/check-tls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Check named 'k8up-check-tls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-check-tls' and verify that '.status.active' is '1'" + + wait_until check/k8up-check-tls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Check named 'k8up-check-tls'" +} + +@test "Given a PVC, When creating a Check (mTLS) of an app, Then expect Restic repository - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Check + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/check/check-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Check named 'k8up-check-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-check-mtls' and verify that '.status.active' is '1'" + + wait_until check/k8up-check-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Check named 'k8up-check-mtls'" +} + +### End check section From a9cf8fd5d108cb4844f2bcdf86b0bc70d6caded3 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 02:08:07 +0330 Subject: [PATCH 13/38] [FIX] Fixning problem in attach mode when failer happend in pod Signed-off-by: poyaz --- e2e/lib/k8up.bash | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/e2e/lib/k8up.bash b/e2e/lib/k8up.bash index 32fda105e..f80b59d7b 100755 --- a/e2e/lib/k8up.bash +++ b/e2e/lib/k8up.bash @@ -79,8 +79,8 @@ restic() { mc() { sleep 3 - kubectl run "minio-mc-$(timestamp)" \ - --attach \ + podname="minio-mc-$(timestamp)" + kubectl run "$podname" \ --restart Never \ --namespace "${DETIK_CLIENT_NAMESPACE-"k8up-system"}" \ --image "minio/mc" \ @@ -91,6 +91,8 @@ mc() { --command -- \ mc \ "${@}" + kubectl wait --for jsonpath='{.status.phase}'=Succeeded pod "$podname" -n "${DETIK_CLIENT_NAMESPACE-"k8up-system"}" --timeout=2m > /dev/null + kubectl -n "${DETIK_CLIENT_NAMESPACE-"k8up-system"}" logs "$podname" } replace_in_file() { From 11f0945c0805ffde4df6d3d09f9a74e8b45ea80a Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 13:09:48 +0330 Subject: [PATCH 14/38] [ADD] Adding RESTORE_CA_CERT_FILE, RESTORE_CA_CERT_FILE, RESTORE_CLIENT_KEY_FILE env instead of filling TLS and mTls options in restore method Signed-off-by: poyaz --- cmd/restic/main.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index fcdfd2533..2f7674617 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -20,12 +20,14 @@ import ( ) const ( - backupDirEnvKey = "BACKUP_DIR" - restoreDirEnvKey = "RESTORE_DIR" - caCertFileEnvKey = "CA_CERT_FILE" - clientCertFileEnvKey = "CLIENT_CERT_FILE" - clientKeyFileEnvKey = "CLIENT_KEY_FILE" - workDirEnvKey = "WORK_DIR" + backupDirEnvKey = "BACKUP_DIR" + restoreDirEnvKey = "RESTORE_DIR" + caCertFileEnvKey = "CA_CERT_FILE" + clientCertFileEnvKey = "CLIENT_CERT_FILE" + clientKeyFileEnvKey = "CLIENT_KEY_FILE" + restoreCaCertFileEnvKey = "RESTORE_CA_CERT_FILE" + restoreClientCertFileEnvKey = "RESTORE_CLIENT_CERT_FILE" + restoreClientKeyFileEnvKey = "RESTORE_CLIENT_KEY_FILE" restoreTypeArg = "restoreType" restoreS3EndpointArg = "restoreS3Endpoint" @@ -67,9 +69,9 @@ var ( &cli.StringFlag{Destination: &cfg.Config.RestoreS3AccessKey, Name: restoreS3AccessKeyIDArg, EnvVars: []string{"RESTORE_ACCESSKEYID"}, Usage: "S3 access key used to connect to the S3 endpoint when restoring"}, &cli.StringFlag{Destination: &cfg.Config.RestoreS3SecretKey, Name: restoreS3SecretAccessKeyArg, EnvVars: []string{"RESTORE_SECRETACCESSKEY"}, Usage: "S3 secret key used to connect to the S3 endpoint when restoring"}, &cli.StringFlag{Destination: &cfg.Config.RestoreS3Endpoint, Name: restoreS3EndpointArg, EnvVars: []string{"RESTORE_S3ENDPOINT"}, Usage: "S3 endpoint to connect to when restoring, e.g. 'https://minio.svc:9000/backup"}, - &cli.PathFlag{Destination: &cfg.Config.RestoreCACert, Name: "restoreCaCert", Usage: "The certificate authority file path using for restore (If isn't filled, using caCert)"}, - &cli.PathFlag{Destination: &cfg.Config.RestoreClientCert, Name: "restoreClientCert", Usage: "The client certificate file path using for restore (If isn't filled, using clientCert)"}, - &cli.PathFlag{Destination: &cfg.Config.RestoreClientKey, Name: "restoreClientKey", Usage: "The client private key file path using for restore (If isn't filled, using clientKey)"}, + &cli.PathFlag{Destination: &cfg.Config.RestoreCACert, Name: "restoreCaCert", EnvVars: []string{restoreCaCertFileEnvKey}, Usage: "The certificate authority file path using for restore"}, + &cli.PathFlag{Destination: &cfg.Config.RestoreClientCert, Name: "restoreClientCert", EnvVars: []string{restoreClientCertFileEnvKey}, Usage: "The client certificate file path using for restore"}, + &cli.PathFlag{Destination: &cfg.Config.RestoreClientKey, Name: "restoreClientKey", EnvVars: []string{restoreClientKeyFileEnvKey}, Usage: "The client private key file path using for restore"}, &cli.BoolFlag{Destination: &cfg.Config.VerifyRestore, Name: "verifyRestore", Usage: "If the restore should get verified, only for PVCs restore"}, &cli.BoolFlag{Destination: &cfg.Config.RestoreTrimPath, Name: "trimRestorePath", EnvVars: []string{"TRIM_RESTOREPATH"}, Value: true, DefaultText: "enabled", Usage: "If set, strips the value of --restoreDir from the lefts side of the remote restore path value"}, From 800b8190ac5aaba780d5773c230c62ce8482f1a0 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 13:10:27 +0330 Subject: [PATCH 15/38] [UPDATE] Update operator and restic cli help according to new values is added Signed-off-by: poyaz --- docs/modules/ROOT/examples/usage/operator.txt | 1 + docs/modules/ROOT/examples/usage/restic.txt | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/docs/modules/ROOT/examples/usage/operator.txt b/docs/modules/ROOT/examples/usage/operator.txt index 928e124fd..2cb914614 100644 --- a/docs/modules/ROOT/examples/usage/operator.txt +++ b/docs/modules/ROOT/examples/usage/operator.txt @@ -46,4 +46,5 @@ OPTIONS: --skip-pvcs-without-annotation skip selecting PVCs that don't have the BACKUP_ANNOTATION (default: disabled) [$BACKUP_SKIP_WITHOUT_ANNOTATION] --checkschedule value the default check schedule (default: "0 0 * * 0") [$BACKUP_CHECKSCHEDULE] --operator-namespace value set the namespace in which the K8up operator itself runs [$BACKUP_OPERATOR_NAMESPACE] + --vardir value the var data dir for read/write k8up data or temp file in pod (default: /k8up) [$VAR_DIR] --help, -h show help (default: false) diff --git a/docs/modules/ROOT/examples/usage/restic.txt b/docs/modules/ROOT/examples/usage/restic.txt index db955c9c3..6127aff2d 100644 --- a/docs/modules/ROOT/examples/usage/restic.txt +++ b/docs/modules/ROOT/examples/usage/restic.txt @@ -27,6 +27,9 @@ OPTIONS: --restoreS3AccessKey value S3 access key used to connect to the S3 endpoint when restoring [$RESTORE_ACCESSKEYID] --restoreS3SecretKey value S3 secret key used to connect to the S3 endpoint when restoring [$RESTORE_SECRETACCESSKEY] --restoreS3Endpoint value S3 endpoint to connect to when restoring, e.g. 'https://minio.svc:9000/backup [$RESTORE_S3ENDPOINT] + --restoreCaCert value The certificate authority file path using for restore [$RESTORE_CA_CERT_FILE] + --restoreClientCert value The client certificate file path using for restore [$RESTORE_CLIENT_CERT_FILE] + --restoreClientKey value The client private key file path using for restore [$RESTORE_CLIENT_KEY_FILE] --verifyRestore If the restore should get verified, only for PVCs restore (default: false) --trimRestorePath If set, strips the value of --restoreDir from the lefts side of the remote restore path value (default: enabled) [$TRIM_RESTOREPATH] --resticBin value The path to the restic binary. (default: "/usr/local/bin/restic") [$RESTIC_BINARY] @@ -47,4 +50,8 @@ OPTIONS: --keepWithin value While pruning, keep tagged snapshots within the given duration, e.g. '2y5m7d3h' [$KEEP_WITHIN] --targetPods value [ --targetPods value ] Filter list of pods by TARGET_PODS names [$TARGET_PODS] --sleepDuration value Sleep for specified amount until init starts (default: 0s) [$SLEEP_DURATION] + --varDir value The var directory is stored k8up metadata files and temporary files (default: /k8up) + --caCert value The certificate authority file path [$CA_CERT_FILE] + --clientCert value The client certificate file path [$CLIENT_CERT_FILE] + --clientKey value The client private key file path [$CLIENT_KEY_FILE] --help, -h show help (default: false) From fe51211404d855339fafa37e528d391638c0765b Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 13:12:03 +0330 Subject: [PATCH 16/38] [FIX] Removing unnecessary snipped tag (tag: ) Signed-off-by: poyaz --- .../ROOT/pages/how-tos/application-aware-backups.adoc | 8 -------- 1 file changed, 8 deletions(-) diff --git a/docs/modules/ROOT/pages/how-tos/application-aware-backups.adoc b/docs/modules/ROOT/pages/how-tos/application-aware-backups.adoc index 12ac23c33..e6a8fec82 100644 --- a/docs/modules/ROOT/pages/how-tos/application-aware-backups.adoc +++ b/docs/modules/ROOT/pages/how-tos/application-aware-backups.adoc @@ -7,14 +7,12 @@ Define an annotation on a Pod: [source,yaml] ---- - template: metadata: labels: app: mariadb annotations: k8up.io/backupcommand: mysqldump -uroot -psecure --all-databases - ---- With this annotation the Operator will trigger that command inside the the container and capture the stdout to a backup. @@ -39,7 +37,6 @@ The parameter `-- clean` ensures all tables including data are purged before, so [source,yaml] ---- - template: metadata: labels: @@ -52,7 +49,6 @@ template: - name: postgres image: docker.io/bitnami/postgresql:11 ... - ---- == MongoDB @@ -62,7 +58,6 @@ It's able to dump all the database contents into a https://www.mongodb.com/blog/ [source,yaml] ---- - template: metadata: labels: @@ -75,7 +70,6 @@ template: - name: mongodb image: quay.io/bitnami/mongodb:4.4.11-debian-10-r12 ... - ---- The proprietary binary archive can only be read by the https://www.mongodb.com/try/download/database-tools[MongoDB Database Tools]. @@ -106,7 +100,6 @@ Using `k8up.io/backupcommand-container` annotation You can specify the container [source,yaml] ---- - template: metadata: labels: @@ -122,5 +115,4 @@ template: - name: postgres - name: prometheus-exporter ... - ---- From b2b83e1a85cb4b5ef186d837d7d6ee7d25a4e94a Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 13:12:30 +0330 Subject: [PATCH 17/38] [ADD] Adding document about how to use TLS and mTls in api refrence Signed-off-by: poyaz --- docs/modules/ROOT/pages/how-tos/archive.adoc | 670 ++++++++++++++++++ docs/modules/ROOT/pages/how-tos/backup.adoc | 148 ++++ docs/modules/ROOT/pages/how-tos/restore.adoc | 685 +++++++++++++++++++ 3 files changed, 1503 insertions(+) diff --git a/docs/modules/ROOT/pages/how-tos/archive.adoc b/docs/modules/ROOT/pages/how-tos/archive.adoc index 7fc712753..b9f851b16 100644 --- a/docs/modules/ROOT/pages/how-tos/archive.adoc +++ b/docs/modules/ROOT/pages/how-tos/archive.adoc @@ -10,3 +10,673 @@ include::example$archive.yaml[] ---- Save the YAML above in a file named `archive.yaml` and use the `kubectl apply -f archive.yaml` command to deploy this configuration to your cluster. + +== Self-signed issuer and Mutual TLS + +If you are using self-signed issuer or using mutual tls for authenticate client, you be able to using volume for mounting cert files into backup object. + +=== Self-signed issuer + +- Using with `options` feature in backend + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using with `env` in backend + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + CA_CERT_FILE: /mnt/ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using with `options` feature in restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + restoreMethod: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using with `env` in restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + RESTORE_CA_CERT_FILE: /mnt/ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + restoreMethod: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using same cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using same cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + CA_CERT_FILE: /mnt/ca/ca.crt + RESTORE_CA_CERT_FILE: /mnt/ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using different cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/custom-ca/ca.crt + volumeMounts: + - name: custom-ca-tls + mountPath: /mnt/custom-ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 + - name: custom-ca-tls + secret: + secretName: custom-ca-tls + defaultMode: 420 +---- + +- Using different cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + CA_CERT_FILE: /mnt/ca/ca.crt + RESTORE_CA_CERT_FILE: /mnt/custom-ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + volumeMounts: + - name: custom-ca-tls + mountPath: /mnt/custom-ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 + - name: custom-ca-tls + secret: + secretName: custom-ca-tls + defaultMode: 420 +---- + +=== Self-signed issuer with mTLS + +- Using with `options` feature in backend + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using with `env` in backend + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using with `options` feature in restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + restoreMethod: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using with `env` in restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + restoreMethod: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using same cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using same cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key + RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using different cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/custom-tls/ca.crt + clientCert: /mnt/custom-tls/tls.crt + clientKey: /mnt/custom-tls/tls.key + volumeMounts: + - name: custom-client-tls + mountPath: /mnt/custom-tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 + - name: custom-client-tls + secret: + secretName: custom-client-tls + defaultMode: 420 +---- + +- Using different cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: archive-cert +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key + RESTORE_CA_CERT_FILE: /mnt/custom-tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/custom-tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/custom-tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: archive-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: archive-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + volumeMounts: + - name: client-custom-tls + mountPath: /mnt/custom-tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 + - name: client-custom-tls + secret: + secretName: client-custom-tls + defaultMode: 420 +---- diff --git a/docs/modules/ROOT/pages/how-tos/backup.adoc b/docs/modules/ROOT/pages/how-tos/backup.adoc index bcdf928c1..7ff013212 100644 --- a/docs/modules/ROOT/pages/how-tos/backup.adoc +++ b/docs/modules/ROOT/pages/how-tos/backup.adoc @@ -12,3 +12,151 @@ Save the YAML above in a file named `backup.yaml` and use the `kubectl apply -f TIP: To have backups run automatically at a regular interval look at xref:how-tos/schedules.adoc[schedules]. TIP: By default, all PVCs are backed up automatically. Adding the annotation `k8up.io/backup=false` to a PVC object will exclude it from all following backups. Alternatively, you can set the environment variable `BACKUP_SKIP_WITHOUT_ANNOTATION=true` if you want K8up to ignore objects without the annotation. + +== Self-signed issuer and Mutual TLS + +If you are using self-signed issuer or using mutual tls for authenticate client, you be able to using volume for mounting cert files into backup object. + +=== Self-signed issuer + +- Using with `options` feature + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Backup +metadata: + name: backup-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using with `env` + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: backup-cert +data: + CA_CERT_FILE: /mnt/ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Backup +metadata: + name: backup-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: backup-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +=== Self-signed issuer with mTLS + +- Using with `options` feature + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Backup +metadata: + name: backup-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using with `env` + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: backup-cert +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Backup +metadata: + name: backup-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + backend: + s3: {} + + envFrom: + - configMapRef: + name: backup-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- \ No newline at end of file diff --git a/docs/modules/ROOT/pages/how-tos/restore.adoc b/docs/modules/ROOT/pages/how-tos/restore.adoc index d7a49e3a7..a4a24dde2 100644 --- a/docs/modules/ROOT/pages/how-tos/restore.adoc +++ b/docs/modules/ROOT/pages/how-tos/restore.adoc @@ -339,3 +339,688 @@ kubectl get jobs kubectl logs -f jobs/resotre-job-123 ``` +== Self-signed issuer and Mutual TLS + +If you are using self-signed issuer or using mutual tls for authenticate client, you be able to using volume for mounting cert files into backup object. + +=== Self-signed issuer + +- Using with `options` feature in backend + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using with `env` in backend + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + CA_CERT_FILE: /mnt/ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using with `options` feature in restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + restoreMethod: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using with `env` in restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + RESTORE_CA_CERT_FILE: /mnt/ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + restoreMethod: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using same cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using same cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + CA_CERT_FILE: /mnt/ca/ca.crt + RESTORE_CA_CERT_FILE: /mnt/ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 +---- + +- Using different cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/custom-ca/ca.crt + volumeMounts: + - name: custom-ca-tls + mountPath: /mnt/custom-ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 + - name: custom-ca-tls + secret: + secretName: custom-ca-tls + defaultMode: 420 +---- + +- Using different cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + CA_CERT_FILE: /mnt/ca/ca.crt + RESTORE_CA_CERT_FILE: /mnt/custom-ca/ca.crt +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: ca-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + volumeMounts: + - name: custom-ca-tls + mountPath: /mnt/custom-ca/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: ca-tls + secret: + secretName: ca-tls + defaultMode: 420 + - name: custom-ca-tls + secret: + secretName: custom-ca-tls + defaultMode: 420 +---- + +=== Self-signed issuer with mTLS + +- Using with `options` feature in backend + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + options: + caCert: /mnt/ca/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using with `env` in backend + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using with `options` feature in restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + restoreMethod: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using with `env` in restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + restoreMethod: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using same cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using same cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key + RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 +---- + +- Using different cert with `options` feature in backend and restore + +[source,yaml] +---- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + options: + caCert: /mnt/tls/ca.crt + clientCert: /mnt/tls/tls.crt + clientKey: /mnt/tls/tls.key + volumeMounts: + - name: client-tls + mountPath: /mnt/tls/ + + restoreMethod: + s3: {} + + options: + caCert: /mnt/custom-tls/ca.crt + clientCert: /mnt/custom-tls/tls.crt + clientKey: /mnt/custom-tls/tls.key + volumeMounts: + - name: custom-client-tls + mountPath: /mnt/custom-tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 + - name: custom-client-tls + secret: + secretName: custom-client-tls + defaultMode: 420 +---- + +- Using different cert with `env` in backend and restore + +[source,yaml] +---- +apiVersion: v1 +kind: ConfigMap +metadata: + name: restore-cert +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key + RESTORE_CA_CERT_FILE: /mnt/custom-tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/custom-tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/custom-tls/tls.key +--- +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: restore-test +spec: + failedJobsHistoryLimit: 2 + successfulJobsHistoryLimit: 2 + snapshot: 162e7a85acbc14de93dad31a3699331cb32187ff0d7bd2227b7c4362a1d13a42 + backend: + s3: {} + + envFrom: + - configMapRef: + name: restore-cert + volumeMounts: + - name: client-tls + mountPath: /mnt/ca/ + + restoreMethod: + s3: {} + + volumeMounts: + - name: client-custom-tls + mountPath: /mnt/custom-tls/ + + podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + volumes: + - name: client-tls + secret: + secretName: client-tls + defaultMode: 420 + - name: client-custom-tls + secret: + secretName: client-custom-tls + defaultMode: 420 +---- \ No newline at end of file From 6b659b8cb4e257c0be41620ea259ae6cc01a3fcb Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 13:14:10 +0330 Subject: [PATCH 18/38] [UPDATE] Update api-refrence according to supporting volume, volumeMount, and options Signed-off-by: poyaz --- .../ROOT/pages/references/api-reference.adoc | 375 ++++++++++++------ 1 file changed, 256 insertions(+), 119 deletions(-) diff --git a/docs/modules/ROOT/pages/references/api-reference.adoc b/docs/modules/ROOT/pages/references/api-reference.adoc index 8b19c03e6..6abdf7c30 100644 --- a/docs/modules/ROOT/pages/references/api-reference.adoc +++ b/docs/modules/ROOT/pages/references/api-reference.adoc @@ -85,24 +85,6 @@ ArchiveSchedule manages schedules for the archival service - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-schedulespec[$$ScheduleSpec$$] **** -[cols="25a,75a", options="header"] -|=== -| Field | Description -| *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. -| *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) -| *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. -| *`restoreMethod`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$]__ | -| *`restoreFilter`* __string__ | -| *`snapshot`* __string__ | -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`tags`* __string array__ | Tags is a list of arbitrary tags that get added to the backup via Restic's tagging system -| *`schedule`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-scheduledefinition[$$ScheduleDefinition$$]__ | -| *`concurrentRunsAllowed`* __boolean__ | -|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archivespec"] @@ -116,22 +98,6 @@ ArchiveSpec defines the desired state of Archive. - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archiveschedule[$$ArchiveSchedule$$] **** -[cols="25a,75a", options="header"] -|=== -| Field | Description -| *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. -| *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) -| *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. -| *`restoreMethod`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$]__ | -| *`restoreFilter`* __string__ | -| *`snapshot`* __string__ | -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`tags`* __string array__ | Tags is a list of arbitrary tags that get added to the backup via Restic's tagging system -|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-azurespec"] @@ -144,6 +110,14 @@ ArchiveSpec defines the desired state of Archive. - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] **** +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`container`* __string__ | +| *`path`* __string__ | +| *`accountNameSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +| *`accountKeySecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-b2spec"] @@ -156,17 +130,24 @@ ArchiveSpec defines the desired state of Archive. - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] **** +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`bucket`* __string__ | +| *`path`* __string__ | +| *`accountIDSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +| *`accountKeySecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend"] === Backend -Backend allows configuring several backend implementations. It is expected that users only configure one storage type. +Backend allows configuring several backend implementations. +It is expected that users only configure one storage type. .Appears In: **** -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archiveschedule[$$ArchiveSchedule$$] -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archivespec[$$ArchiveSpec$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupschedule[$$BackupSchedule$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupspec[$$BackupSpec$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backuptemplate[$$BackupTemplate$$] @@ -192,11 +173,32 @@ Backend allows configuring several backend implementations. It is expected that | *`swift`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-swiftspec[$$SwiftSpec$$]__ | | *`b2`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-b2spec[$$B2Spec$$]__ | | *`rest`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restserverspec[$$RestServerSpec$$]__ | +| *`options`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backendopts[$$BackendOpts$$]__ | +| *`volumeMounts`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#volumemount-v1-core[$$VolumeMount$$]__ | |=== +[id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backendopts"] +=== BackendOpts + + + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`caCert`* __string__ | +| *`clientCert`* __string__ | +| *`clientKey`* __string__ | +|=== + + [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backup"] === Backup @@ -252,23 +254,31 @@ BackupSchedule manages schedules for the backup service | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. | *`promURL`* __string__ | PromURL sets a prometheus push URL where the backup container send metrics to -| *`statsURL`* __string__ | StatsURL sets an arbitrary URL where the restic container posts metrics and information about the snapshots to. This is in addition to the prometheus pushgateway. +| *`statsURL`* __string__ | StatsURL sets an arbitrary URL where the restic container posts metrics and +information about the snapshots to. This is in addition to the prometheus +pushgateway. | *`tags`* __string array__ | Tags is a list of arbitrary tags that get added to the backup via Restic's tagging system -| *`schedule`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-scheduledefinition[$$ScheduleDefinition$$]__ | -| *`concurrentRunsAllowed`* __boolean__ | |=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupspec"] === BackupSpec -BackupSpec defines a single backup. It must contain all information to connect to the backup repository when applied. If used with defaults or schedules the operator will ensure that the defaults are applied before creating the object on the API. +BackupSpec defines a single backup. It must contain all information to connect to +the backup repository when applied. If used with defaults or schedules the operator will +ensure that the defaults are applied before creating the object on the API. .Appears In: **** @@ -282,13 +292,21 @@ BackupSpec defines a single backup. It must contain all information to connect t | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. | *`promURL`* __string__ | PromURL sets a prometheus push URL where the backup container send metrics to -| *`statsURL`* __string__ | StatsURL sets an arbitrary URL where the restic container posts metrics and information about the snapshots to. This is in addition to the prometheus pushgateway. +| *`statsURL`* __string__ | StatsURL sets an arbitrary URL where the restic container posts metrics and +information about the snapshots to. This is in addition to the prometheus +pushgateway. | *`tags`* __string array__ | Tags is a list of arbitrary tags that get added to the backup via Restic's tagging system |=== @@ -350,21 +368,26 @@ CheckSchedule manages the schedules for the checks | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. | *`promURL`* __string__ | PromURL sets a prometheus push URL where the backup container send metrics to -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`schedule`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-scheduledefinition[$$ScheduleDefinition$$]__ | -| *`concurrentRunsAllowed`* __boolean__ | +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. |=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-checkspec"] === CheckSpec -CheckSpec defines the desired state of Check. It needs to contain the repository information. +CheckSpec defines the desired state of Check. It needs to contain the repository +information. .Appears In: **** @@ -378,15 +401,25 @@ CheckSpec defines the desired state of Check. It needs to contain the repository | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. | *`promURL`* __string__ | PromURL sets a prometheus push URL where the backup container send metrics to -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. |=== + + + + [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-effectiveschedule"] === EffectiveSchedule @@ -400,7 +433,7 @@ CheckSpec defines the desired state of Check. It needs to contain the repository [cols="25a,75a", options="header"] |=== | Field | Description -| *`jobType`* __JobType__ | +| *`jobType`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-jobtype[$$JobType$$]__ | | *`generatedSchedule`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-scheduledefinition[$$ScheduleDefinition$$]__ | |=== @@ -433,11 +466,6 @@ CheckSpec defines the desired state of Check. It needs to contain the repository - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$] **** -[cols="25a,75a", options="header"] -|=== -| Field | Description -| *`PersistentVolumeClaimVolumeSource`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#persistentvolumeclaimvolumesource-v1-core[$$PersistentVolumeClaimVolumeSource$$]__ | -|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-gcsspec"] @@ -450,9 +478,13 @@ CheckSpec defines the desired state of Check. It needs to contain the repository - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] **** - - - +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`bucket`* __string__ | +| *`projectIDSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +| *`accessTokenSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +|=== @@ -466,6 +498,11 @@ CheckSpec defines the desired state of Check. It needs to contain the repository - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] **** +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`mountPath`* __string__ | +|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-pod"] @@ -483,7 +520,8 @@ Pod is a dummy struct to fix some code generation issues. | Field | Description | *`metadata`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta[$$ObjectMeta$$]__ | Refer to Kubernetes API documentation for fields of `metadata`. -| *`spec`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podspec-v1-core[$$PodSpec$$]__ | Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status +| *`spec`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podspec-v1-core[$$PodSpec$$]__ | Specification of the desired behavior of the pod. +More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status |=== @@ -529,7 +567,9 @@ PreBackupPodList contains a list of PreBackupPod [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-prebackuppodspec"] === PreBackupPodSpec -PreBackupPodSpec define pods that will be launched during the backup. After the backup has finished (successfully or not), they should be removed again automatically by the operator. +PreBackupPodSpec define pods that will be launched during the backup. After the backup +has finished (successfully or not), they should be removed again automatically +by the operator. .Appears In: **** @@ -600,21 +640,26 @@ PruneSchedule manages the schedules for the prunes | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. | *`retention`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-retentionpolicy[$$RetentionPolicy$$]__ | Retention sets how many backups should be kept after a forget and prune -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`schedule`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-scheduledefinition[$$ScheduleDefinition$$]__ | -| *`concurrentRunsAllowed`* __boolean__ | +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. |=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-prunespec"] === PruneSpec -PruneSpec needs to contain the repository information as well as the desired retention policies. +PruneSpec needs to contain the repository information as well as the desired +retention policies. .Appears In: **** @@ -628,12 +673,18 @@ PruneSpec needs to contain the repository information as well as the desired ret | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. | *`retention`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-retentionpolicy[$$RetentionPolicy$$]__ | Retention sets how many backups should be kept after a forget and prune -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. |=== @@ -647,6 +698,13 @@ PruneSpec needs to contain the repository information as well as the desired ret - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] **** +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`url`* __string__ | +| *`userSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +| *`passwordSecretReg`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restore"] @@ -691,12 +749,11 @@ RestoreList contains a list of Restore [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod"] === RestoreMethod -RestoreMethod contains how and where the restore should happen all the settings are mutual exclusive. +RestoreMethod contains how and where the restore should happen +all the settings are mutual exclusive. .Appears In: **** -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archiveschedule[$$ArchiveSchedule$$] -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archivespec[$$ArchiveSpec$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreschedule[$$RestoreSchedule$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restorespec[$$RestoreSpec$$] **** @@ -706,6 +763,27 @@ RestoreMethod contains how and where the restore should happen all the settings | Field | Description | *`s3`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-s3spec[$$S3Spec$$]__ | | *`folder`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-folderrestore[$$FolderRestore$$]__ | +| *`options`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreopts[$$RestoreOpts$$]__ | +| *`volumeMounts`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#volumemount-v1-core[$$VolumeMount$$]__ | +|=== + + +[id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreopts"] +=== RestoreOpts + + + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`caCert`* __string__ | +| *`clientCert`* __string__ | +| *`clientKey`* __string__ | |=== @@ -725,24 +803,29 @@ RestoreSchedule manages schedules for the restore service | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. | *`restoreMethod`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$]__ | | *`restoreFilter`* __string__ | | *`snapshot`* __string__ | -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. | *`tags`* __string array__ | Tags is a list of arbitrary tags that get added to the backup via Restic's tagging system -| *`schedule`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-scheduledefinition[$$ScheduleDefinition$$]__ | -| *`concurrentRunsAllowed`* __boolean__ | |=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restorespec"] === RestoreSpec -RestoreSpec can either contain an S3 restore point or a local one. For the local one you need to define an existing PVC. +RestoreSpec can either contain an S3 restore point or a local one. For the local +one you need to define an existing PVC. .Appears In: **** @@ -758,14 +841,20 @@ RestoreSpec can either contain an S3 restore point or a local one. For the local | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. | *`restoreMethod`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$]__ | | *`restoreFilter`* __string__ | | *`snapshot`* __string__ | -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. | *`tags`* __string array__ | Tags is a list of arbitrary tags that get added to the backup via Restic's tagging system |=== @@ -791,7 +880,8 @@ RestoreSpec can either contain an S3 restore point or a local one. For the local | *`keepMonthly`* __integer__ | | *`keepYearly`* __integer__ | | *`keepTags`* __string array__ | -| *`tags`* __string array__ | Tags is a filter on what tags the policy should be applied DO NOT CONFUSE THIS WITH KeepTags OR YOU'LL have a bad time +| *`tags`* __string array__ | Tags is a filter on what tags the policy should be applied +DO NOT CONFUSE THIS WITH KeepTags OR YOU'LL have a bad time | *`hostnames`* __string array__ | Hostnames is a filter on what hostnames the policy should be applied |=== @@ -803,8 +893,6 @@ RunnableSpec defines the fields that are necessary on the specs of all actions t .Appears In: **** -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archiveschedule[$$ArchiveSchedule$$] -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archivespec[$$ArchiveSpec$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupschedule[$$BackupSchedule$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupspec[$$BackupSpec$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-checkschedule[$$CheckSchedule$$] @@ -821,7 +909,42 @@ RunnableSpec defines the fields that are necessary on the specs of all actions t | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | Backend contains the restic repo where the job should backup to. | *`resources`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | Resources describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which this action shall be executed. -| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. Value must be positive integer if given. +| *`volumes`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec[$$RunnableVolumeSpec$$]__ | Volumes List of volumes that can be mounted by containers belonging to the pod. +| *`activeDeadlineSeconds`* __integer__ | ActiveDeadlineSeconds specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it. +Value must be positive integer if given. +|=== + + +[id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablevolumespec"] +=== RunnableVolumeSpec + + + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupschedule[$$BackupSchedule$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupspec[$$BackupSpec$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-checkschedule[$$CheckSchedule$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-checkspec[$$CheckSpec$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-pruneschedule[$$PruneSchedule$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-prunespec[$$PruneSpec$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreschedule[$$RestoreSchedule$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restorespec[$$RestoreSpec$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-runnablespec[$$RunnableSpec$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`name`* __string__ | name of the volume. +Must be a DNS_LABEL and unique within the pod. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names +| *`persistentVolumeClaim`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#persistentvolumeclaimvolumesource-v1-core[$$PersistentVolumeClaimVolumeSource$$]__ | persistentVolumeClaimVolumeSource represents a reference to a +PersistentVolumeClaim in the same namespace. +More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims +| *`secret`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretvolumesource-v1-core[$$SecretVolumeSource$$]__ | secret represents a secret that should populate this volume. +More info: https://kubernetes.io/docs/concepts/storage/volumes#secret +| *`configMap`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#configmapvolumesource-v1-core[$$ConfigMapVolumeSource$$]__ | configMap represents a configMap that should populate this volume |=== @@ -836,6 +959,14 @@ RunnableSpec defines the fields that are necessary on the specs of all actions t - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$] **** +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`endpoint`* __string__ | +| *`bucket`* __string__ | +| *`accessKeyIDSecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +| *`secretAccessKeySecretRef`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#secretkeyselector-v1-core[$$SecretKeySelector$$]__ | +|=== [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-schedule"] @@ -888,12 +1019,7 @@ ScheduleDefinition is the actual cron-type expression that defines the interval .Appears In: **** -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-archiveschedule[$$ArchiveSchedule$$] -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backupschedule[$$BackupSchedule$$] -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-checkschedule[$$CheckSchedule$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-effectiveschedule[$$EffectiveSchedule$$] -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-pruneschedule[$$PruneSchedule$$] -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreschedule[$$RestoreSchedule$$] - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-schedulecommon[$$ScheduleCommon$$] **** @@ -936,10 +1062,14 @@ ScheduleSpec defines the schedules for the various job types. | *`check`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-checkschedule[$$CheckSchedule$$]__ | | *`prune`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-pruneschedule[$$PruneSchedule$$]__ | | *`backend`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$]__ | -| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. - Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. -| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. KeepJobs is used property is not specified. -| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. KeepJobs is used property is not specified. +| *`keepJobs`* __integer__ | KeepJobs amount of jobs to keep for later analysis. + + +Deprecated: Use FailedJobsHistoryLimit and SuccessfulJobsHistoryLimit respectively. +| *`failedJobsHistoryLimit`* __integer__ | FailedJobsHistoryLimit amount of failed jobs to keep for later analysis. +KeepJobs is used property is not specified. +| *`successfulJobsHistoryLimit`* __integer__ | SuccessfulJobsHistoryLimit amount of successful jobs to keep for later analysis. +KeepJobs is used property is not specified. | *`resourceRequirementsTemplate`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#resourcerequirements-v1-core[$$ResourceRequirements$$]__ | ResourceRequirementsTemplate describes the compute resource requirements (cpu, memory, etc.) | *`podSecurityContext`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#podsecuritycontext-v1-core[$$PodSecurityContext$$]__ | PodSecurityContext describes the security context with which actions (such as backups) shall be executed. |=== @@ -991,7 +1121,8 @@ SnapshotList contains a list of Snapshot [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-snapshotspec"] === SnapshotSpec -SnapshotSpec contains all information needed about a restic snapshot so it can be restored. +SnapshotSpec contains all information needed about a restic snapshot so it +can be restored. .Appears In: **** @@ -1022,5 +1153,11 @@ SnapshotSpec contains all information needed about a restic snapshot so it can b - xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] **** +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`container`* __string__ | +| *`path`* __string__ | +|=== From bd2c880bf3d573aec5085a26b8aaaf383440d38c Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 15:01:04 +0330 Subject: [PATCH 19/38] [UPDATE] Update cert-manager to v1.14.4 Signed-off-by: poyaz --- e2e/lib/k8up.bash | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/e2e/lib/k8up.bash b/e2e/lib/k8up.bash index f80b59d7b..ac7615b42 100755 --- a/e2e/lib/k8up.bash +++ b/e2e/lib/k8up.bash @@ -222,7 +222,7 @@ given_s3_storage() { give_self_signed_issuer() { ns=${NAMESPACE=${DETIK_CLIENT_NAMESPACE}} - kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.0/cert-manager.yaml + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml kubectl wait -n cert-manager --for=condition=Available deployment/cert-manager-webhook --timeout=120s yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/cert/issure.yaml | kubectl apply -f - From 41825f9f4e88d2042e84cff46ea1a21a2e78ca4d Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 15:03:06 +0330 Subject: [PATCH 20/38] [ADD] Adding e2e definitaions for using env for TLS and mTls Signed-off-by: poyaz --- e2e/definitions/archive/config-mtls-env.yaml | 12 +++++ .../archive/s3-mtls-archive-mtls-env.yaml | 45 +++++++++++++++++++ e2e/definitions/backup/backup-mtls-env.yaml | 35 +++++++++++++++ e2e/definitions/backup/config-mtls-env.yaml | 9 ++++ e2e/definitions/check/check-mtls-env.yaml | 35 +++++++++++++++ e2e/definitions/check/config-mtls-env.yaml | 9 ++++ e2e/definitions/restore/config-mtls-env.yaml | 12 +++++ .../restore/s3-mtls-restore-mtls-env.yaml | 45 +++++++++++++++++++ 8 files changed, 202 insertions(+) create mode 100644 e2e/definitions/archive/config-mtls-env.yaml create mode 100644 e2e/definitions/archive/s3-mtls-archive-mtls-env.yaml create mode 100644 e2e/definitions/backup/backup-mtls-env.yaml create mode 100644 e2e/definitions/backup/config-mtls-env.yaml create mode 100644 e2e/definitions/check/check-mtls-env.yaml create mode 100644 e2e/definitions/check/config-mtls-env.yaml create mode 100644 e2e/definitions/restore/config-mtls-env.yaml create mode 100644 e2e/definitions/restore/s3-mtls-restore-mtls-env.yaml diff --git a/e2e/definitions/archive/config-mtls-env.yaml b/e2e/definitions/archive/config-mtls-env.yaml new file mode 100644 index 000000000..4837b6d19 --- /dev/null +++ b/e2e/definitions/archive/config-mtls-env.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8up-s3-mtls-archive-mtls-env + namespace: k8up-e2e-subject +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key + RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key \ No newline at end of file diff --git a/e2e/definitions/archive/s3-mtls-archive-mtls-env.yaml b/e2e/definitions/archive/s3-mtls-archive-mtls-env.yaml new file mode 100644 index 000000000..247abaac5 --- /dev/null +++ b/e2e/definitions/archive/s3-mtls-archive-mtls-env.yaml @@ -0,0 +1,45 @@ +apiVersion: k8up.io/v1 +kind: Archive +metadata: + name: k8up-s3-mtls-archive-mtls-env + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: archive + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + envFrom: + - configMapRef: + name: k8up-s3-mtls-archive-mtls-env + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/backup/backup-mtls-env.yaml b/e2e/definitions/backup/backup-mtls-env.yaml new file mode 100644 index 000000000..39ce2c525 --- /dev/null +++ b/e2e/definitions/backup/backup-mtls-env.yaml @@ -0,0 +1,35 @@ +apiVersion: k8up.io/v1 +kind: Backup +metadata: + name: k8up-backup-mtls-env + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + envFrom: + - configMapRef: + name: k8up-backup-mtls-env + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/backup/config-mtls-env.yaml b/e2e/definitions/backup/config-mtls-env.yaml new file mode 100644 index 000000000..e9a1f8330 --- /dev/null +++ b/e2e/definitions/backup/config-mtls-env.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8up-backup-mtls-env + namespace: k8up-e2e-subject +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key \ No newline at end of file diff --git a/e2e/definitions/check/check-mtls-env.yaml b/e2e/definitions/check/check-mtls-env.yaml new file mode 100644 index 000000000..c550e5bae --- /dev/null +++ b/e2e/definitions/check/check-mtls-env.yaml @@ -0,0 +1,35 @@ +apiVersion: k8up.io/v1 +kind: Check +metadata: + name: k8up-check-mtls-env + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + envFrom: + - configMapRef: + name: k8up-check-mtls-env + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 diff --git a/e2e/definitions/check/config-mtls-env.yaml b/e2e/definitions/check/config-mtls-env.yaml new file mode 100644 index 000000000..031af038c --- /dev/null +++ b/e2e/definitions/check/config-mtls-env.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8up-check-mtls-env + namespace: k8up-e2e-subject +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key \ No newline at end of file diff --git a/e2e/definitions/restore/config-mtls-env.yaml b/e2e/definitions/restore/config-mtls-env.yaml new file mode 100644 index 000000000..a9bf242ad --- /dev/null +++ b/e2e/definitions/restore/config-mtls-env.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8up-s3-mtls-restore-mtls-env + namespace: k8up-e2e-subject +data: + CA_CERT_FILE: /mnt/tls/ca.crt + CLIENT_CERT_FILE: /mnt/tls/tls.crt + CLIENT_KEY_FILE: /mnt/tls/tls.key + RESTORE_CA_CERT_FILE: /mnt/tls/ca.crt + RESTORE_CLIENT_CERT_FILE: /mnt/tls/tls.crt + RESTORE_CLIENT_KEY_FILE: /mnt/tls/tls.key \ No newline at end of file diff --git a/e2e/definitions/restore/s3-mtls-restore-mtls-env.yaml b/e2e/definitions/restore/s3-mtls-restore-mtls-env.yaml new file mode 100644 index 000000000..1d8bd4757 --- /dev/null +++ b/e2e/definitions/restore/s3-mtls-restore-mtls-env.yaml @@ -0,0 +1,45 @@ +apiVersion: k8up.io/v1 +kind: Restore +metadata: + name: k8up-s3-mtls-restore-mtls-env + namespace: k8up-e2e-subject +spec: + failedJobsHistoryLimit: 1 + successfulJobsHistoryLimit: 1 + restoreMethod: + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: restore + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + backend: + repoPasswordSecretRef: + name: backup-repo + key: password + envFrom: + - configMapRef: + name: k8up-s3-mtls-restore-mtls-env + s3: + endpoint: https://minio-mtls.minio-e2e.svc.cluster.local + bucket: backup + accessKeyIDSecretRef: + name: backup-credentials + key: username + secretAccessKeySecretRef: + name: backup-credentials + key: password + volumeMounts: + - name: minio-client-mtls + mountPath: /mnt/tls/ + podSecurityContext: + fsGroup: $ID + runAsUser: $ID + volumes: + - name: minio-client-mtls + secret: + secretName: minio-client-mtls + defaultMode: 420 From da60a0b0bde4d44f1bd94cb92698749e01440c1a Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 15:03:13 +0330 Subject: [PATCH 21/38] ADD] Adding e2e test over using env for TLS and mTls Also fixing bug in get lentgh of archive object in minio-mc Signed-off-by: poyaz --- e2e/test-10-self-signed-tls.bats | 132 ++++++++++++++++++++++++++++++- 1 file changed, 128 insertions(+), 4 deletions(-) diff --git a/e2e/test-10-self-signed-tls.bats b/e2e/test-10-self-signed-tls.bats index 6fa1b5f4e..36db94037 100644 --- a/e2e/test-10-self-signed-tls.bats +++ b/e2e/test-10-self-signed-tls.bats @@ -89,6 +89,45 @@ DEBUG_DETIK="true" [ "${output}" = "${expected_content}" ] } +@test "Given a PVC, When creating a Backup (mTLS with env) of an app, Then expect Restic repository - using self-signed issuer" { + expected_content="expected content for mtls: $(timestamp)" + expected_filename="expected_filename.txt" + + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_a_subject "${expected_filename}" "${expected_content}" + + kubectl apply -f definitions/secrets + kubectl apply -f definitions/backup/config-mtls-env.yaml + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup-mtls-env.yaml | kubectl apply -f - + + try "at most 10 times every 5s to get backup named 'k8up-backup-mtls-env' and verify that '.status.started' is 'true'" + verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup-mtls-env' '.status.active' 1 true + + wait_until backup/k8up-backup-mtls-env completed + + run restic snapshots + + echo "---BEGIN restic snapshots output---" + echo "${output}" + echo "---END---" + + echo -n "Number of Snapshots >= 1? " + jq -e 'length >= 1' <<< "${output}" # Ensure that there was actually a backup created + + run get_latest_snap + + run restic dump "${output}" "/data/subject-pvc/${expected_filename}" + + echo "---BEGIN actual ${expected_filename}---" + echo "${output}" + echo "---END---" + + [ "${output}" = "${expected_content}" ] +} + ### End backup section ### Start restore to pvc section @@ -251,6 +290,30 @@ DEBUG_DETIK="true" expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" } +@test "Given an existing Restic repository, When creating a Restore (mTLS with env), Then Restore to S3 (mTLS with env) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Restore + kubectl apply -f definitions/secrets + kubectl apply -f definitions/restore/config-mtls-env.yaml + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-mtls-env.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-mtls-env' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-mtls-env' and verify that '.status.active' is '1'" + + wait_until restore/k8up-s3-mtls-restore-mtls-env completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-mtls-env'" + + expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" +} + ### End restore to s3 section ### Start archive to s3 section @@ -286,7 +349,7 @@ DEBUG_DETIK="true" run mc ls minio/archive echo "---BEGIN total archives output---" - total_archives=$(echo -e "${output}" | wc -l) + total_archives=$(echo -n -e "${output}" | wc -l) echo "${total_archives}" echo "---END---" @@ -324,7 +387,7 @@ DEBUG_DETIK="true" run mc ls minio/archive echo "---BEGIN total archives output---" - total_archives=$(echo -e "${output}" | wc -l) + total_archives=$(echo -n -e "${output}" | wc -l) echo "${total_archives}" echo "---END---" @@ -362,7 +425,7 @@ DEBUG_DETIK="true" run mc ls minio/archive echo "---BEGIN total archives output---" - total_archives=$(echo -e "${output}" | wc -l) + total_archives=$(echo -n -e "${output}" | wc -l) echo "${total_archives}" echo "---END---" @@ -400,7 +463,46 @@ DEBUG_DETIK="true" run mc ls minio/archive echo "---BEGIN total archives output---" - total_archives=$(echo -e "${output}" | wc -l) + total_archives=$(echo -n -e "${output}" | wc -l) + echo "${total_archives}" + echo "---END---" + + [ "$total_snapshots" -eq "$total_archives" ] +} + +@test "Given an existing Restic repository, When creating a Archive (mTLS with env), Then Restore to S3 (mTLS with env) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + given_a_clean_archive archive + + # Archive + kubectl apply -f definitions/secrets + kubectl apply -f definitions/archive/config-mtls-env.yaml + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-mtls-env.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-mtls-env' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-mtls-env' and verify that '.status.active' is '1'" + + wait_until archive/k8up-s3-mtls-archive-mtls-env completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-mtls-env'" + + run restic list snapshots + + echo "---BEGIN total restic snapshots output---" + total_snapshots=$(echo -e "${output}" | wc -l) + echo "${total_snapshots}" + echo "---END---" + + run mc ls minio/archive + + echo "---BEGIN total archives output---" + total_archives=$(echo -n -e "${output}" | wc -l) echo "${total_archives}" echo "---END---" @@ -453,4 +555,26 @@ DEBUG_DETIK="true" verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Check named 'k8up-check-mtls'" } +@test "Given a PVC, When creating a Check (mTLS with env) of an app, Then expect Restic repository - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_backup "${expected_filename}" "${expected_content}" + + # Check + kubectl apply -f definitions/secrets + kubectl apply -f definitions/check/config-mtls-env.yaml + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/check/check-mtls-env.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Check named 'k8up-check-mtls-env' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-check-mtls-env' and verify that '.status.active' is '1'" + + wait_until check/k8up-check-mtls-env completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Check named 'k8up-check-mtls-env'" +} + ### End check section From c668b2585cc9da21b1faa4df81ddac74a9914858 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 16:53:54 +0330 Subject: [PATCH 22/38] [FIX] Fixing integration test for restic s3. Missing CaCert arguments Signed-off-by: poyaz --- cmd/restic/integration_test.go | 48 ++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 80775d479..9532ceea4 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -59,9 +59,11 @@ func newTestErrorChannel() chan error { func (w *webhookserver) runWebServer(t *testing.T) { mux := http.NewServeMux() - mux.HandleFunc("/", func(wr http.ResponseWriter, r *http.Request) { - w.jsonData, _ = io.ReadAll(r.Body) - }) + mux.HandleFunc( + "/", func(wr http.ResponseWriter, r *http.Request) { + w.jsonData, _ = io.ReadAll(r.Body) + }, + ) srv := &testServer{ Server: http.Server{ @@ -110,9 +112,11 @@ func initTest(t *testing.T) *testEnvironment { cleanupDirs(t) createTestFiles(t) - t.Cleanup(func() { - cleanupDirs(t) - }) + t.Cleanup( + func() { + cleanupDirs(t) + }, + ) webhook := startWebhookWebserver(t, ctx) s3client := connectToS3Server(t, ctx) @@ -131,7 +135,7 @@ func initTest(t *testing.T) *testEnvironment { func connectToS3Server(t *testing.T, ctx context.Context) *s3.Client { repo := getS3Repo() - s3client := s3.New(repo, os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY")) + s3client := s3.New(repo, os.Getenv("AWS_ACCESS_KEY_ID"), os.Getenv("AWS_SECRET_ACCESS_KEY"), s3.Cert{}) err := s3client.Connect(ctx) require.NoErrorf(t, err, "Unable to connect to S3 repo '%s'", repo) @@ -140,10 +144,12 @@ func connectToS3Server(t *testing.T, ctx context.Context) *s3.Client { _ = s3client.DeleteBucket(ctx) t.Logf("Ensured that the bucket '%s' does not exist", repo) - t.Cleanup(func() { - _ = s3client.DeleteBucket(ctx) - t.Logf("Removing the bucket '%s'", repo) - }) + t.Cleanup( + func() { + _ = s3client.DeleteBucket(ctx) + t.Logf("Removing the bucket '%s'", repo) + }, + ) return s3client } @@ -151,15 +157,17 @@ func startWebhookWebserver(t *testing.T, ctx context.Context) *webhookserver { webhook := &webhookserver{} webhook.runWebServer(t) t.Logf("Started webserver on '%s'", webhook.srv.Addr) - t.Cleanup(func() { - if webhook.srv == nil { - t.Log("Webserver not running.") - return - } + t.Cleanup( + func() { + if webhook.srv == nil { + t.Log("Webserver not running.") + return + } - t.Logf("Stopping the webserver on '%s'", webhook.srv.Addr) - webhook.srv.Shutdown(ctx) - }) + t.Logf("Stopping the webserver on '%s'", webhook.srv.Addr) + webhook.srv.Shutdown(ctx) + }, + ) return webhook } @@ -211,7 +219,7 @@ func testBackup(t *testing.T) *testEnvironment { } func testCheckS3Restore(t *testing.T, ctx context.Context) { - s3c := s3.New(os.Getenv("RESTORE_S3ENDPOINT"), os.Getenv("RESTORE_ACCESSKEYID"), os.Getenv("RESTORE_SECRETACCESSKEY")) + s3c := s3.New(os.Getenv("RESTORE_S3ENDPOINT"), os.Getenv("RESTORE_ACCESSKEYID"), os.Getenv("RESTORE_SECRETACCESSKEY"), s3.Cert{}) err := s3c.Connect(ctx) require.NoError(t, err) files, err := s3c.ListObjects(ctx) From 295e5bf5d0cdc7c8fb43a9e383ea0f1b3a4339c6 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 16:55:12 +0330 Subject: [PATCH 23/38] [ADD] Adding variable GO_EXEC in Makefile to choose different versions of Golang Signed-off-by: poyaz --- envtest/integration.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/envtest/integration.mk b/envtest/integration.mk index 45e5e8314..09e117aca 100644 --- a/envtest/integration.mk +++ b/envtest/integration.mk @@ -6,7 +6,7 @@ clean_targets += .envtest-clean # Prepare binary $(setup_envtest_bin): export GOBIN = $(go_bin) $(setup_envtest_bin): | $(go_bin) - go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest + $(GO_EXEC) install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest .PHONY: integration-test # operator module { @@ -30,7 +30,7 @@ integration-test: $(setup_envtest_bin) generate restic-integration-test-setup .e $(setup_envtest_bin) $(ENVTEST_ADDITIONAL_FLAGS) use '$(ENVTEST_K8S_VERSION)!' @chmod -R +w $(go_bin)/k8s export KUBEBUILDER_ASSETS="$$($(setup_envtest_bin) $(ENVTEST_ADDITIONAL_FLAGS) use -i -p path '$(ENVTEST_K8S_VERSION)!')" && \ - go test -tags=integration -coverprofile cover.out -covermode atomic ./... + $(GO_EXEC) test -tags=integration -coverprofile cover.out -covermode atomic ./... $(envtest_crd_dir): @mkdir -p $@ From 9f776fe3570f2af634d960fbbe8d7c46c08f4930 Mon Sep 17 00:00:00 2001 From: poyaz Date: Sat, 23 Mar 2024 17:05:19 +0330 Subject: [PATCH 24/38] [FIX] Fix test for expected args Signed-off-by: poyaz --- operator/restorecontroller/executor_test.go | 54 ++++++++++++--------- 1 file changed, 31 insertions(+), 23 deletions(-) diff --git a/operator/restorecontroller/executor_test.go b/operator/restorecontroller/executor_test.go index 1956aaa88..454735886 100644 --- a/operator/restorecontroller/executor_test.go +++ b/operator/restorecontroller/executor_test.go @@ -4,11 +4,12 @@ import ( "context" "testing" - k8upv1 "github.com/k8up-io/k8up/v2/api/v1" - "github.com/k8up-io/k8up/v2/operator/job" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" + "github.com/k8up-io/k8up/v2/operator/job" ) type PVCExpectation struct { @@ -131,15 +132,17 @@ func TestRestore_setupEnvVars(t *testing.T) { } for name, tt := range tests { - t.Run(name, func(t *testing.T) { - e := NewRestoreExecutor(*newConfig()) - envVars := e.setupEnvVars(context.TODO(), tt.GivenResource) + t.Run( + name, func(t *testing.T) { + e := NewRestoreExecutor(*newConfig()) + envVars := e.setupEnvVars(context.TODO(), tt.GivenResource) - actualEnvVars, actualSecretKeyRefs := extractVarsAndSecretRefs(envVars) + actualEnvVars, actualSecretKeyRefs := extractVarsAndSecretRefs(envVars) - assert.Equal(t, actualEnvVars, tt.ExpectedEnvVars) - assert.Equal(t, actualSecretKeyRefs, tt.ExpectedSecretKeyRefs) - }) + assert.Equal(t, actualEnvVars, tt.ExpectedEnvVars) + assert.Equal(t, actualSecretKeyRefs, tt.ExpectedSecretKeyRefs) + }, + ) } } @@ -183,13 +186,15 @@ func TestRestore_volumeConfig(t *testing.T) { } for name, tt := range tests { - t.Run(name, func(t *testing.T) { - e := NewRestoreExecutor(*newConfig()) - volumes, mounts := e.volumeConfig(tt.GivenResource) + t.Run( + name, func(t *testing.T) { + e := NewRestoreExecutor(*newConfig()) + volumes, mounts := e.volumeConfig(tt.GivenResource) - assertVolumes(t, tt.ExpectedPVC, volumes) - assertVolumeMounts(t, tt.ExpectedVolumeMount, mounts) - }) + assertVolumes(t, tt.ExpectedPVC, volumes) + assertVolumeMounts(t, tt.ExpectedVolumeMount, mounts) + }, + ) } } @@ -226,15 +231,16 @@ func TestRestore_args(t *testing.T) { }{ "givenS3RestoreResource_whenArgs_expectS3RestoreType": { GivenResource: newS3RestoreResource(), - ExpectedArgs: []string{"-restore", "-restoreType", "s3"}, + ExpectedArgs: []string{"-varDir", "/k8up", "-restore", "-restoreType", "s3"}, }, "givenFolderRestoreResource_whenArgs_expectFolderRestoreType": { GivenResource: newFolderRestoreResource(), - ExpectedArgs: []string{"-restore", "-restoreType", "folder"}, + ExpectedArgs: []string{"-varDir", "/k8up", "-restore", "-restoreType", "folder"}, }, "givenFolderRestoreResourceWithAdditionalArguments_whenBuildRestoreObject_expectJobResource": { GivenResource: newFilteredFolderRestoreResource(), ExpectedArgs: []string{ + "-varDir", "/k8up", "-restore", "--tag", "testtag", "--tag", "another", @@ -246,12 +252,14 @@ func TestRestore_args(t *testing.T) { } for name, tt := range tests { - t.Run(name, func(t *testing.T) { - e := NewRestoreExecutor(*newConfig()) - args, err := e.setupArgs(tt.GivenResource) + t.Run( + name, func(t *testing.T) { + e := NewRestoreExecutor(*newConfig()) + args, err := e.setupArgs(tt.GivenResource) - require.NoError(t, err) - assert.Equal(t, tt.ExpectedArgs, args) - }) + require.NoError(t, err) + assert.Equal(t, tt.ExpectedArgs, args) + }, + ) } } From f6b0f1259cac555c07b48e3d646eae27eddadeca Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Sat, 6 Apr 2024 11:20:33 +0330 Subject: [PATCH 25/38] [DELETE] Delete command "sleep 3" Signed-off-by: Pooya Azarpour --- e2e/lib/k8up.bash | 3 --- 1 file changed, 3 deletions(-) diff --git a/e2e/lib/k8up.bash b/e2e/lib/k8up.bash index ac7615b42..0f4f574c9 100755 --- a/e2e/lib/k8up.bash +++ b/e2e/lib/k8up.bash @@ -56,7 +56,6 @@ clear_pv_data() { # It's very unreliable unfortunately. So running the pod, waiting and getting the # log output is a lot less prone for race conditions. restic() { - sleep 3 podname="restic-$(timestamp)" kubectl run "$podname" \ --restart Never \ @@ -78,7 +77,6 @@ restic() { } mc() { - sleep 3 podname="minio-mc-$(timestamp)" kubectl run "$podname" \ --restart Never \ @@ -295,7 +293,6 @@ given_an_existing_backup() { if [ ! -z "${output}" ]; then break fi - sleep 3 done # shellcheck disable=SC2154 From 8713c813f673929e959760c342e9be0aba3dfff9 Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Sat, 6 Apr 2024 13:40:33 +0330 Subject: [PATCH 26/38] [UPDATE] Formatting go files to old style (Remove idea customziation formatter) Signed-off-by: Pooya Azarpour --- api/v1/backend.go | 6 +- cmd/restic/integration_test.go | 44 +++--- operator/archivecontroller/executor.go | 110 ++++++--------- operator/backupcontroller/backup_utils.go | 87 ++++++------ operator/cfg/config.go | 2 + operator/checkcontroller/executor.go | 82 +++++------- operator/prunecontroller/executor.go | 91 ++++++------- operator/restorecontroller/executor.go | 140 ++++++++------------ operator/restorecontroller/executor_test.go | 49 +++---- restic/cli/restic.go | 2 +- restic/s3/client.go | 5 +- 11 files changed, 251 insertions(+), 367 deletions(-) diff --git a/api/v1/backend.go b/api/v1/backend.go index dd60cffab..27f152642 100644 --- a/api/v1/backend.go +++ b/api/v1/backend.go @@ -92,11 +92,7 @@ func IsNil(v interface{}) bool { return v == nil || (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) } -func addEnvVarFromSecret( - vars map[string]*corev1.EnvVarSource, - key string, - ref *corev1.SecretKeySelector, -) { +func addEnvVarFromSecret(vars map[string]*corev1.EnvVarSource, key string, ref *corev1.SecretKeySelector) { if ref != nil { vars[key] = &corev1.EnvVarSource{ SecretKeyRef: ref, diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 9532ceea4..90d33dbd6 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -59,11 +59,9 @@ func newTestErrorChannel() chan error { func (w *webhookserver) runWebServer(t *testing.T) { mux := http.NewServeMux() - mux.HandleFunc( - "/", func(wr http.ResponseWriter, r *http.Request) { - w.jsonData, _ = io.ReadAll(r.Body) - }, - ) + mux.HandleFunc("/", func(wr http.ResponseWriter, r *http.Request) { + w.jsonData, _ = io.ReadAll(r.Body) + }) srv := &testServer{ Server: http.Server{ @@ -112,11 +110,9 @@ func initTest(t *testing.T) *testEnvironment { cleanupDirs(t) createTestFiles(t) - t.Cleanup( - func() { - cleanupDirs(t) - }, - ) + t.Cleanup(func() { + cleanupDirs(t) + }) webhook := startWebhookWebserver(t, ctx) s3client := connectToS3Server(t, ctx) @@ -144,12 +140,10 @@ func connectToS3Server(t *testing.T, ctx context.Context) *s3.Client { _ = s3client.DeleteBucket(ctx) t.Logf("Ensured that the bucket '%s' does not exist", repo) - t.Cleanup( - func() { - _ = s3client.DeleteBucket(ctx) - t.Logf("Removing the bucket '%s'", repo) - }, - ) + t.Cleanup(func() { + _ = s3client.DeleteBucket(ctx) + t.Logf("Removing the bucket '%s'", repo) + }) return s3client } @@ -157,17 +151,15 @@ func startWebhookWebserver(t *testing.T, ctx context.Context) *webhookserver { webhook := &webhookserver{} webhook.runWebServer(t) t.Logf("Started webserver on '%s'", webhook.srv.Addr) - t.Cleanup( - func() { - if webhook.srv == nil { - t.Log("Webserver not running.") - return - } + t.Cleanup(func() { + if webhook.srv == nil { + t.Log("Webserver not running.") + return + } - t.Logf("Stopping the webserver on '%s'", webhook.srv.Addr) - webhook.srv.Shutdown(ctx) - }, - ) + t.Logf("Stopping the webserver on '%s'", webhook.srv.Addr) + webhook.srv.Shutdown(ctx) + }) return webhook } diff --git a/operator/archivecontroller/executor.go b/operator/archivecontroller/executor.go index 02309048e..487c77345 100644 --- a/operator/archivecontroller/executor.go +++ b/operator/archivecontroller/executor.go @@ -3,14 +3,13 @@ package archivecontroller import ( "context" + "github.com/k8up-io/k8up/v2/operator/executor" + "github.com/k8up-io/k8up/v2/operator/utils" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/k8up-io/k8up/v2/operator/executor" - "github.com/k8up-io/k8up/v2/operator/utils" - k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -48,33 +47,25 @@ func (a *ArchiveExecutor) Execute(ctx context.Context) error { batchJob.Name = a.jobName() batchJob.Namespace = a.archive.Namespace - _, err := controllerutil.CreateOrUpdate( - ctx, a.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, a.archive, a.Config) - if mutateErr != nil { - return mutateErr - } + _, err := controllerutil.CreateOrUpdate(ctx, a.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, a.archive, a.Config) + if mutateErr != nil { + return mutateErr + } - batchJob.Spec.Template.Spec.Containers[0].Env = a.setupEnvVars(ctx, a.archive) - a.archive.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = a.attachMoreVolumes() + batchJob.Spec.Template.Spec.Containers[0].Env = a.setupEnvVars(ctx, a.archive) + a.archive.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = a.attachMoreVolumes() - args, argsErr := a.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args + args, argsErr := a.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args - return argsErr - }, - ) + return argsErr + }) if err != nil { log.Error(err, "could not create job") - a.SetConditionFalseWithMessage( - ctx, - k8upv1.ConditionReady, - k8upv1.ReasonCreationFailed, - "could not create job: %v", - err, - ) + a.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) return err } @@ -96,10 +87,7 @@ func (a *ArchiveExecutor) setupArgs() ([]string, error) { return args, nil } -func (a *ArchiveExecutor) setupEnvVars( - ctx context.Context, - archive *k8upv1.Archive, -) []corev1.EnvVar { +func (a *ArchiveExecutor) setupEnvVars(ctx context.Context, archive *k8upv1.Archive) []corev1.EnvVar { log := controllerruntime.LoggerFrom(ctx) vars := executor.NewEnvVarConverter() @@ -129,14 +117,7 @@ func (a *ArchiveExecutor) setupEnvVars( err := vars.Merge(executor.DefaultEnv(a.Obj.GetNamespace())) if err != nil { - log.Error( - err, - "error while merging the environment variables", - "name", - a.Obj.GetName(), - "namespace", - a.Obj.GetNamespace(), - ) + log.Error(err, "error while merging the environment variables", "name", a.Obj.GetName(), "namespace", a.Obj.GetNamespace()) } return vars.Convert() @@ -154,35 +135,28 @@ func (a *ArchiveExecutor) appendOptionsArgs() []string { args = append(args, []string{"-caCert", a.archive.Spec.Backend.Options.CACert}...) } if a.archive.Spec.Backend.Options.ClientCert != "" && a.archive.Spec.Backend.Options.ClientKey != "" { - args = append( - args, - []string{ - "-clientCert", - a.archive.Spec.Backend.Options.ClientCert, - "-clientKey", - a.archive.Spec.Backend.Options.ClientKey, - }..., - ) + addMoreArgs := []string{ + "-clientCert", + a.archive.Spec.Backend.Options.ClientCert, + "-clientKey", + a.archive.Spec.Backend.Options.ClientKey, + } + args = append(args, addMoreArgs...) } } if a.archive.Spec.RestoreSpec != nil && a.archive.Spec.RestoreMethod.Options != nil { if a.archive.Spec.RestoreMethod.Options.CACert != "" { - args = append( - args, - []string{"-restoreCaCert", a.archive.Spec.RestoreMethod.Options.CACert}..., - ) + args = append(args, []string{"-restoreCaCert", a.archive.Spec.RestoreMethod.Options.CACert}...) } if a.archive.Spec.RestoreMethod.Options.ClientCert != "" && a.archive.Spec.RestoreMethod.Options.ClientKey != "" { - args = append( - args, - []string{ - "-restoreClientCert", - a.archive.Spec.RestoreMethod.Options.ClientCert, - "-restoreClientKey", - a.archive.Spec.RestoreMethod.Options.ClientKey, - }..., - ) + addMoreArgs := []string{ + "-restoreClientCert", + a.archive.Spec.RestoreMethod.Options.ClientCert, + "-restoreClientKey", + a.archive.Spec.RestoreMethod.Options.ClientKey, + } + args = append(args, addMoreArgs...) } } @@ -215,12 +189,11 @@ func (a *ArchiveExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append( - moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }, - ) + addVolume := corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + } + moreVolumes = append(moreVolumes, addVolume) } return moreVolumes @@ -253,8 +226,11 @@ func (a *ArchiveExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { } } - ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} - volumeMount = append(volumeMount, ku8pVolumeMount) + addVolumeMount := corev1.VolumeMount{ + Name: _dataDirName, + MountPath: cfg.Config.PodVarDir, + } + volumeMount = append(volumeMount, addVolumeMount) return volumeMount } diff --git a/operator/backupcontroller/backup_utils.go b/operator/backupcontroller/backup_utils.go index 796ee4527..03828cb72 100644 --- a/operator/backupcontroller/backup_utils.go +++ b/operator/backupcontroller/backup_utils.go @@ -3,16 +3,14 @@ package backupcontroller import ( "context" "fmt" - "path" - + "github.com/k8up-io/k8up/v2/operator/executor" + "github.com/k8up-io/k8up/v2/operator/utils" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "path" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/k8up-io/k8up/v2/operator/executor" - "github.com/k8up-io/k8up/v2/operator/utils" - "github.com/k8up-io/k8up/v2/operator/cfg" ) @@ -34,10 +32,7 @@ func (b *BackupExecutor) newVolumeMounts(claims []corev1.Volume) []corev1.Volume return mounts } -func containsAccessMode( - s []corev1.PersistentVolumeAccessMode, - e corev1.PersistentVolumeAccessMode, -) bool { +func containsAccessMode(s []corev1.PersistentVolumeAccessMode, e corev1.PersistentVolumeAccessMode) bool { for _, a := range s { if a == e { return true @@ -50,11 +45,9 @@ func (b *BackupExecutor) createServiceAccountAndBinding(ctx context.Context) err sa := &corev1.ServiceAccount{} sa.Name = cfg.Config.ServiceAccount sa.Namespace = b.backup.Namespace - _, err := controllerruntime.CreateOrUpdate( - ctx, b.Config.Client, sa, func() error { - return nil - }, - ) + _, err := controllerruntime.CreateOrUpdate(ctx, b.Config.Client, sa, func() error { + return nil + }) if err != nil { return err } @@ -65,23 +58,21 @@ func (b *BackupExecutor) createServiceAccountAndBinding(ctx context.Context) err roleBinding := &rbacv1.RoleBinding{} roleBinding.Name = cfg.Config.PodExecRoleName + "-namespaced" roleBinding.Namespace = b.backup.Namespace - _, err = controllerruntime.CreateOrUpdate( - ctx, b.Config.Client, roleBinding, func() error { - roleBinding.Subjects = []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: b.backup.Namespace, - Name: sa.Name, - }, - } - roleBinding.RoleRef = rbacv1.RoleRef{ - Kind: "ClusterRole", - Name: "k8up-executor", - APIGroup: "rbac.authorization.k8s.io", - } - return nil - }, - ) + _, err = controllerruntime.CreateOrUpdate(ctx, b.Config.Client, roleBinding, func() error { + roleBinding.Subjects = []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Namespace: b.backup.Namespace, + Name: sa.Name, + }, + } + roleBinding.RoleRef = rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "k8up-executor", + APIGroup: "rbac.authorization.k8s.io", + } + return nil + }) return err } @@ -145,12 +136,11 @@ func (b *BackupExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append( - moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }, - ) + addVolume := corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + } + moreVolumes = append(moreVolumes, addVolume) } return moreVolumes @@ -163,8 +153,11 @@ func (b *BackupExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { volumeMount = *b.backup.Spec.Backend.VolumeMounts } - ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} - volumeMount = append(volumeMount, ku8pVolumeMount) + addVolumeMount := corev1.VolumeMount{ + Name: _dataDirName, + MountPath: cfg.Config.PodVarDir, + } + volumeMount = append(volumeMount, addVolumeMount) return volumeMount } @@ -180,15 +173,13 @@ func (b *BackupExecutor) appendOptionsArgs() []string { args = append(args, []string{"-caCert", b.backup.Spec.Backend.Options.CACert}...) } if b.backup.Spec.Backend.Options.ClientCert != "" && b.backup.Spec.Backend.Options.ClientKey != "" { - args = append( - args, - []string{ - "-clientCert", - b.backup.Spec.Backend.Options.ClientCert, - "-clientKey", - b.backup.Spec.Backend.Options.ClientKey, - }..., - ) + addMoreArgs := []string{ + "-clientCert", + b.backup.Spec.Backend.Options.ClientCert, + "-clientKey", + b.backup.Spec.Backend.Options.ClientKey, + } + args = append(args, addMoreArgs...) } return args diff --git a/operator/cfg/config.go b/operator/cfg/config.go index afa52a96f..bf76d5665 100644 --- a/operator/cfg/config.go +++ b/operator/cfg/config.go @@ -2,6 +2,7 @@ package cfg import ( "fmt" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" ) @@ -81,6 +82,7 @@ type Configuration struct { EnableLeaderElection bool OperatorNamespace string + // the var data dir for read/write k8up data or temp file in the backup pod PodVarDir string // Allows to pass options to restic, see https://restic.readthedocs.io/en/stable/manual_rest.html?highlight=--option#usage-help diff --git a/operator/checkcontroller/executor.go b/operator/checkcontroller/executor.go index a97e10f1d..d228d2a8a 100644 --- a/operator/checkcontroller/executor.go +++ b/operator/checkcontroller/executor.go @@ -3,14 +3,12 @@ package checkcontroller import ( "context" + "github.com/k8up-io/k8up/v2/operator/executor" "github.com/k8up-io/k8up/v2/operator/utils" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" - "github.com/k8up-io/k8up/v2/operator/executor" - k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -48,33 +46,26 @@ func (c *CheckExecutor) Execute(ctx context.Context) error { batchJob.Name = c.jobName() batchJob.Namespace = c.check.Namespace - _, err := controllerruntime.CreateOrUpdate( - ctx, c.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, c.check, c.Config) - if mutateErr != nil { - return mutateErr - } + _, err := controllerruntime.CreateOrUpdate(ctx, c.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, c.check, c.Config) + if mutateErr != nil { + return mutateErr + } - batchJob.Spec.Template.Spec.Containers[0].Env = c.setupEnvVars(ctx) - c.check.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = c.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = c.attachMoreVolumes() - batchJob.Labels[job.K8upExclusive] = "true" + batchJob.Spec.Template.Spec.Containers[0].Env = c.setupEnvVars(ctx) + c.check.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = c.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = c.attachMoreVolumes() + batchJob.Labels[job.K8upExclusive] = "true" - args, argsErr := c.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args + args, argsErr := c.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args - return argsErr - }, + return argsErr + }, ) if err != nil { - c.SetConditionFalseWithMessage( - ctx, - k8upv1.ConditionReady, - k8upv1.ReasonCreationFailed, - "could not create job: %v", - err, - ) + c.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) return err } c.SetStarted(ctx, "the job '%v/%v' was created", batchJob.Namespace, batchJob.Name) @@ -109,14 +100,7 @@ func (c *CheckExecutor) setupEnvVars(ctx context.Context) []corev1.EnvVar { err := vars.Merge(executor.DefaultEnv(c.Obj.GetNamespace())) if err != nil { - log.Error( - err, - "error while merging the environment variables", - "name", - c.Obj.GetName(), - "namespace", - c.Obj.GetNamespace(), - ) + log.Error(err, "error while merging the environment variables", "name", c.Obj.GetName(), "namespace", c.Obj.GetNamespace()) } return vars.Convert() @@ -136,15 +120,13 @@ func (c *CheckExecutor) appendOptionsArgs() []string { args = append(args, []string{"-caCert", c.check.Spec.Backend.Options.CACert}...) } if c.check.Spec.Backend.Options.ClientCert != "" && c.check.Spec.Backend.Options.ClientKey != "" { - args = append( - args, - []string{ - "-clientCert", - c.check.Spec.Backend.Options.ClientCert, - "-clientKey", - c.check.Spec.Backend.Options.ClientKey, - }..., - ) + addMoreArgs := []string{ + "-clientCert", + c.check.Spec.Backend.Options.ClientCert, + "-clientKey", + c.check.Spec.Backend.Options.ClientKey, + } + args = append(args, addMoreArgs...) } return args @@ -176,12 +158,11 @@ func (c *CheckExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append( - moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }, - ) + addVolume := corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + } + moreVolumes = append(moreVolumes, addVolume) } return moreVolumes @@ -194,7 +175,10 @@ func (c *CheckExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { volumeMount = *c.check.Spec.Backend.VolumeMounts } - ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} + ku8pVolumeMount := corev1.VolumeMount{ + Name: _dataDirName, + MountPath: cfg.Config.PodVarDir, + } volumeMount = append(volumeMount, ku8pVolumeMount) return volumeMount diff --git a/operator/prunecontroller/executor.go b/operator/prunecontroller/executor.go index 5caded3cc..7c71ced5a 100644 --- a/operator/prunecontroller/executor.go +++ b/operator/prunecontroller/executor.go @@ -5,15 +5,13 @@ import ( "strconv" "strings" + "github.com/k8up-io/k8up/v2/operator/executor" "github.com/k8up-io/k8up/v2/operator/utils" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/k8up-io/k8up/v2/operator/executor" - k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -41,34 +39,26 @@ func (p *PruneExecutor) Execute(ctx context.Context) error { batchJob.Name = p.jobName() batchJob.Namespace = p.prune.Namespace - _, err := controllerutil.CreateOrUpdate( - ctx, p.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, p.prune, p.Config) - if mutateErr != nil { - return mutateErr - } - - batchJob.Spec.Template.Spec.Containers[0].Env = p.setupEnvVars(ctx, p.prune) - batchJob.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount - p.prune.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = p.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = p.attachMoreVolumes() - batchJob.Labels[job.K8upExclusive] = "true" - - args, argsErr := p.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args - - return argsErr - }, - ) + _, err := controllerutil.CreateOrUpdate(ctx, p.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, p.prune, p.Config) + if mutateErr != nil { + return mutateErr + } + + batchJob.Spec.Template.Spec.Containers[0].Env = p.setupEnvVars(ctx, p.prune) + batchJob.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount + p.prune.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = p.attachMoreVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = p.attachMoreVolumes() + batchJob.Labels[job.K8upExclusive] = "true" + + args, argsErr := p.setupArgs() + batchJob.Spec.Template.Spec.Containers[0].Args = args + + return argsErr + }) if err != nil { - p.SetConditionFalseWithMessage( - ctx, - k8upv1.ConditionReady, - k8upv1.ReasonCreationFailed, - "could not create job: %v", - err, - ) + p.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) return err } @@ -146,14 +136,7 @@ func (p *PruneExecutor) setupEnvVars(ctx context.Context, prune *k8upv1.Prune) [ err := vars.Merge(executor.DefaultEnv(p.Obj.GetNamespace())) if err != nil { - log.Error( - err, - "error while merging the environment variables", - "name", - p.Obj.GetName(), - "namespace", - p.Obj.GetNamespace(), - ) + log.Error(err, "error while merging the environment variables", "name", p.Obj.GetName(), "namespace", p.Obj.GetNamespace()) } return vars.Convert() @@ -169,15 +152,13 @@ func (p *PruneExecutor) appendOptionsArgs() []string { args = append(args, []string{"-caCert", p.prune.Spec.Backend.Options.CACert}...) } if p.prune.Spec.Backend.Options.ClientCert != "" && p.prune.Spec.Backend.Options.ClientKey != "" { - args = append( - args, - []string{ - "-clientCert", - p.prune.Spec.Backend.Options.ClientCert, - "-clientKey", - p.prune.Spec.Backend.Options.ClientKey, - }..., - ) + addMoreArgs := []string{ + "-clientCert", + p.prune.Spec.Backend.Options.ClientCert, + "-clientKey", + p.prune.Spec.Backend.Options.ClientKey, + } + args = append(args, addMoreArgs...) } return args @@ -209,12 +190,11 @@ func (p *PruneExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append( - moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }, - ) + addVolume := corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + } + moreVolumes = append(moreVolumes, addVolume) } return moreVolumes @@ -227,7 +207,10 @@ func (p *PruneExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { volumeMount = *p.prune.Spec.Backend.VolumeMounts } - ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} + ku8pVolumeMount := corev1.VolumeMount{ + Name: _dataDirName, + MountPath: cfg.Config.PodVarDir, + } volumeMount = append(volumeMount, ku8pVolumeMount) return volumeMount diff --git a/operator/restorecontroller/executor.go b/operator/restorecontroller/executor.go index c86c29f14..2cd619a2a 100644 --- a/operator/restorecontroller/executor.go +++ b/operator/restorecontroller/executor.go @@ -5,15 +5,13 @@ import ( "errors" "fmt" + "github.com/k8up-io/k8up/v2/operator/executor" "github.com/k8up-io/k8up/v2/operator/utils" - batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/k8up-io/k8up/v2/operator/executor" - k8upv1 "github.com/k8up-io/k8up/v2/api/v1" "github.com/k8up-io/k8up/v2/operator/cfg" "github.com/k8up-io/k8up/v2/operator/job" @@ -53,13 +51,7 @@ func (r *RestoreExecutor) Execute(ctx context.Context) error { restoreJob, err := r.createRestoreObject(ctx, restore) if err != nil { log.Error(err, "unable to create or update restore object") - r.SetConditionFalseWithMessage( - ctx, - k8upv1.ConditionReady, - k8upv1.ReasonCreationFailed, - "unable to create restore object: %v", - err, - ) + r.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "unable to create restore object: %v", err) return nil } @@ -79,28 +71,23 @@ func (r *RestoreExecutor) createRestoreObject( batchJob := &batchv1.Job{} batchJob.Name = r.jobName() batchJob.Namespace = restore.Namespace - _, err := controllerutil.CreateOrUpdate( - ctx, r.Client, batchJob, func() error { - mutateErr := job.MutateBatchJob(batchJob, restore, r.Config) - if mutateErr != nil { - return mutateErr - } - batchJob.Labels[job.K8upExclusive] = "true" - batchJob.Spec.Template.Spec.Containers[0].Env = r.setupEnvVars(ctx, restore) - restore.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - - volumes, volumeMounts := r.volumeConfig(restore) - batchJob.Spec.Template.Spec.Volumes = append(volumes, r.attachMoreVolumes()...) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = append( - volumeMounts, - r.attachMoreVolumeMounts()..., - ) - - args, argsErr := r.setupArgs(restore) - batchJob.Spec.Template.Spec.Containers[0].Args = args - return argsErr - }, - ) + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, batchJob, func() error { + mutateErr := job.MutateBatchJob(batchJob, restore, r.Config) + if mutateErr != nil { + return mutateErr + } + batchJob.Labels[job.K8upExclusive] = "true" + batchJob.Spec.Template.Spec.Containers[0].Env = r.setupEnvVars(ctx, restore) + restore.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) + + volumes, volumeMounts := r.volumeConfig(restore) + batchJob.Spec.Template.Spec.Volumes = append(volumes, r.attachMoreVolumes()...) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = append(volumeMounts, r.attachMoreVolumeMounts()...) + + args, argsErr := r.setupArgs(restore) + batchJob.Spec.Template.Spec.Containers[0].Args = args + return argsErr + }) return batchJob, err } @@ -129,11 +116,7 @@ func (r *RestoreExecutor) setupArgs(restore *k8upv1.Restore) ([]string, error) { case restore.Spec.RestoreMethod.S3 != nil: args = append(args, "-restoreType", "s3") default: - return nil, fmt.Errorf( - "undefined restore method (-restoreType) on '%v/%v'", - restore.Namespace, - restore.Name, - ) + return nil, fmt.Errorf("undefined restore method (-restoreType) on '%v/%v'", restore.Namespace, restore.Name) } args = append(args, r.appendOptionsArgs()...) @@ -147,15 +130,13 @@ func (r *RestoreExecutor) volumeConfig(restore *k8upv1.Restore) ( ) { volumes := make([]corev1.Volume, 0) if restore.Spec.RestoreMethod.S3 == nil { - volumes = append( - volumes, - corev1.Volume{ - Name: restore.Spec.RestoreMethod.Folder.ClaimName, - VolumeSource: corev1.VolumeSource{ - PersistentVolumeClaim: restore.Spec.RestoreMethod.Folder.PersistentVolumeClaimVolumeSource, - }, + addVolume := corev1.Volume{ + Name: restore.Spec.RestoreMethod.Folder.ClaimName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: restore.Spec.RestoreMethod.Folder.PersistentVolumeClaimVolumeSource, }, - ) + } + volumes = append(volumes, addVolume) } mounts := make([]corev1.VolumeMount, 0) @@ -170,10 +151,7 @@ func (r *RestoreExecutor) volumeConfig(restore *k8upv1.Restore) ( return volumes, mounts } -func (r *RestoreExecutor) setupEnvVars( - ctx context.Context, - restore *k8upv1.Restore, -) []corev1.EnvVar { +func (r *RestoreExecutor) setupEnvVars(ctx context.Context, restore *k8upv1.Restore) []corev1.EnvVar { log := controllerruntime.LoggerFrom(ctx) vars := executor.NewEnvVarConverter() @@ -199,14 +177,7 @@ func (r *RestoreExecutor) setupEnvVars( err := vars.Merge(executor.DefaultEnv(r.Obj.GetNamespace())) if err != nil { - log.Error( - err, - "error while merging the environment variables", - "name", - r.Obj.GetName(), - "namespace", - r.Obj.GetNamespace(), - ) + log.Error(err, "error while merging the environment variables", "name", r.Obj.GetName(), "namespace", r.Obj.GetNamespace()) } return vars.Convert() @@ -220,35 +191,28 @@ func (r *RestoreExecutor) appendOptionsArgs() []string { args = append(args, []string{"--caCert", r.restore.Spec.Backend.Options.CACert}...) } if r.restore.Spec.Backend.Options.ClientCert != "" && r.restore.Spec.Backend.Options.ClientKey != "" { - args = append( - args, - []string{ - "--clientCert", - r.restore.Spec.Backend.Options.ClientCert, - "--clientKey", - r.restore.Spec.Backend.Options.ClientKey, - }..., - ) + addMoreArgs := []string{ + "--clientCert", + r.restore.Spec.Backend.Options.ClientCert, + "--clientKey", + r.restore.Spec.Backend.Options.ClientKey, + } + args = append(args, addMoreArgs...) } } if r.restore.Spec.RestoreMethod != nil && r.restore.Spec.RestoreMethod.Options != nil { if r.restore.Spec.RestoreMethod.Options.CACert != "" { - args = append( - args, - []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.Options.CACert}..., - ) + args = append(args, []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.Options.CACert}...) } if r.restore.Spec.RestoreMethod.Options.ClientCert != "" && r.restore.Spec.RestoreMethod.Options.ClientKey != "" { - args = append( - args, - []string{ - "--restoreClientCert", - r.restore.Spec.RestoreMethod.Options.ClientCert, - "--restoreClientKey", - r.restore.Spec.RestoreMethod.Options.ClientKey, - }..., - ) + addMoreArgs := []string{ + "--restoreClientCert", + r.restore.Spec.RestoreMethod.Options.ClientCert, + "--restoreClientKey", + r.restore.Spec.RestoreMethod.Options.ClientKey, + } + args = append(args, addMoreArgs...) } } @@ -281,12 +245,11 @@ func (r *RestoreExecutor) attachMoreVolumes() []corev1.Volume { continue } - moreVolumes = append( - moreVolumes, corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - }, - ) + addVolume := corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + } + moreVolumes = append(moreVolumes, addVolume) } return moreVolumes @@ -319,8 +282,11 @@ func (r *RestoreExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { } } - ku8pVolumeMount := corev1.VolumeMount{Name: _dataDirName, MountPath: cfg.Config.PodVarDir} - volumeMount = append(volumeMount, ku8pVolumeMount) + addVolumeMount := corev1.VolumeMount{ + Name: _dataDirName, + MountPath: cfg.Config.PodVarDir, + } + volumeMount = append(volumeMount, addVolumeMount) return volumeMount } diff --git a/operator/restorecontroller/executor_test.go b/operator/restorecontroller/executor_test.go index 454735886..6be32d384 100644 --- a/operator/restorecontroller/executor_test.go +++ b/operator/restorecontroller/executor_test.go @@ -4,12 +4,11 @@ import ( "context" "testing" + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" + "github.com/k8up-io/k8up/v2/operator/job" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" - - k8upv1 "github.com/k8up-io/k8up/v2/api/v1" - "github.com/k8up-io/k8up/v2/operator/job" ) type PVCExpectation struct { @@ -132,17 +131,15 @@ func TestRestore_setupEnvVars(t *testing.T) { } for name, tt := range tests { - t.Run( - name, func(t *testing.T) { - e := NewRestoreExecutor(*newConfig()) - envVars := e.setupEnvVars(context.TODO(), tt.GivenResource) + t.Run(name, func(t *testing.T) { + e := NewRestoreExecutor(*newConfig()) + envVars := e.setupEnvVars(context.TODO(), tt.GivenResource) - actualEnvVars, actualSecretKeyRefs := extractVarsAndSecretRefs(envVars) + actualEnvVars, actualSecretKeyRefs := extractVarsAndSecretRefs(envVars) - assert.Equal(t, actualEnvVars, tt.ExpectedEnvVars) - assert.Equal(t, actualSecretKeyRefs, tt.ExpectedSecretKeyRefs) - }, - ) + assert.Equal(t, actualEnvVars, tt.ExpectedEnvVars) + assert.Equal(t, actualSecretKeyRefs, tt.ExpectedSecretKeyRefs) + }) } } @@ -186,15 +183,13 @@ func TestRestore_volumeConfig(t *testing.T) { } for name, tt := range tests { - t.Run( - name, func(t *testing.T) { - e := NewRestoreExecutor(*newConfig()) - volumes, mounts := e.volumeConfig(tt.GivenResource) + t.Run(name, func(t *testing.T) { + e := NewRestoreExecutor(*newConfig()) + volumes, mounts := e.volumeConfig(tt.GivenResource) - assertVolumes(t, tt.ExpectedPVC, volumes) - assertVolumeMounts(t, tt.ExpectedVolumeMount, mounts) - }, - ) + assertVolumes(t, tt.ExpectedPVC, volumes) + assertVolumeMounts(t, tt.ExpectedVolumeMount, mounts) + }) } } @@ -252,14 +247,12 @@ func TestRestore_args(t *testing.T) { } for name, tt := range tests { - t.Run( - name, func(t *testing.T) { - e := NewRestoreExecutor(*newConfig()) - args, err := e.setupArgs(tt.GivenResource) + t.Run(name, func(t *testing.T) { + e := NewRestoreExecutor(*newConfig()) + args, err := e.setupArgs(tt.GivenResource) - require.NoError(t, err) - assert.Equal(t, tt.ExpectedArgs, args) - }, - ) + require.NoError(t, err) + assert.Equal(t, tt.ExpectedArgs, args) + }) } } diff --git a/restic/cli/restic.go b/restic/cli/restic.go index 12aa8bf26..c65770ad7 100644 --- a/restic/cli/restic.go +++ b/restic/cli/restic.go @@ -2,13 +2,13 @@ package cli import ( "context" - "github.com/k8up-io/k8up/v2/operator/utils" "path" "path/filepath" "strings" "github.com/go-logr/logr" + "github.com/k8up-io/k8up/v2/operator/utils" "github.com/k8up-io/k8up/v2/restic/cfg" "github.com/k8up-io/k8up/v2/restic/dto" ) diff --git a/restic/s3/client.go b/restic/s3/client.go index d83a86509..b65ffa1d3 100644 --- a/restic/s3/client.go +++ b/restic/s3/client.go @@ -5,13 +5,14 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" "io" "net/http" "net/url" "os" "strings" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" ) // Client wraps the minio s3 client From d1319f04bdc84bdbae5345aec69c7e7519ae3e26 Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Sat, 6 Apr 2024 13:43:25 +0330 Subject: [PATCH 27/38] [FIX] Fix typo and document's grammers Signed-off-by: Pooya Azarpour --- cmd/operator/main.go | 2 +- docs/modules/ROOT/examples/usage/operator.txt | 2 +- docs/modules/ROOT/pages/how-tos/backup.adoc | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 186923f63..399568f2b 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -84,7 +84,7 @@ var ( &cli.StringFlag{Destination: &cfg.Config.BackupCheckSchedule, Name: "checkschedule", EnvVars: []string{"BACKUP_CHECKSCHEDULE"}, Value: "0 0 * * 0", Usage: "the default check schedule"}, &cli.StringFlag{Destination: &cfg.Config.OperatorNamespace, Name: "operator-namespace", EnvVars: []string{"BACKUP_OPERATOR_NAMESPACE"}, Required: true, Usage: "set the namespace in which the K8up operator itself runs"}, - &cli.StringFlag{Destination: &cfg.Config.PodVarDir, Name: "vardir", EnvVars: []string{"VAR_DIR"}, Value: "/k8up", Usage: "the var data dir for read/write k8up data or temp file in pod"}, + &cli.StringFlag{Destination: &cfg.Config.PodVarDir, Name: "vardir", EnvVars: []string{"VAR_DIR"}, Value: "/k8up", Usage: "the var data dir for read/write k8up data or temp file in the backup pod"}, }, } ) diff --git a/docs/modules/ROOT/examples/usage/operator.txt b/docs/modules/ROOT/examples/usage/operator.txt index 2cb914614..f43fbc136 100644 --- a/docs/modules/ROOT/examples/usage/operator.txt +++ b/docs/modules/ROOT/examples/usage/operator.txt @@ -46,5 +46,5 @@ OPTIONS: --skip-pvcs-without-annotation skip selecting PVCs that don't have the BACKUP_ANNOTATION (default: disabled) [$BACKUP_SKIP_WITHOUT_ANNOTATION] --checkschedule value the default check schedule (default: "0 0 * * 0") [$BACKUP_CHECKSCHEDULE] --operator-namespace value set the namespace in which the K8up operator itself runs [$BACKUP_OPERATOR_NAMESPACE] - --vardir value the var data dir for read/write k8up data or temp file in pod (default: /k8up) [$VAR_DIR] + --vardir value the var data dir for read/write k8up data or temp file in the backup pod (default: /k8up) [$VAR_DIR] --help, -h show help (default: false) diff --git a/docs/modules/ROOT/pages/how-tos/backup.adoc b/docs/modules/ROOT/pages/how-tos/backup.adoc index 7ff013212..e61000d93 100644 --- a/docs/modules/ROOT/pages/how-tos/backup.adoc +++ b/docs/modules/ROOT/pages/how-tos/backup.adoc @@ -15,7 +15,7 @@ TIP: By default, all PVCs are backed up automatically. Adding the annotation `k8 == Self-signed issuer and Mutual TLS -If you are using self-signed issuer or using mutual tls for authenticate client, you be able to using volume for mounting cert files into backup object. +If you are using self-signed issuer or using mutual tls for authenticate client, you're able use a volume for mounting cert files into the backup object. === Self-signed issuer From 9b4216ae321640379d7935922c80cbe5653f3d04 Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Mon, 8 Apr 2024 11:01:50 +0330 Subject: [PATCH 28/38] [DELETE] Delete unnecessary error param in setupArgs function Signed-off-by: Pooya Azarpour --- operator/archivecontroller/executor.go | 9 ++++----- operator/backupcontroller/backup_utils.go | 4 ++-- operator/backupcontroller/executor.go | 5 ++--- operator/checkcontroller/executor.go | 9 ++++----- operator/prunecontroller/executor.go | 9 ++++----- 5 files changed, 16 insertions(+), 20 deletions(-) diff --git a/operator/archivecontroller/executor.go b/operator/archivecontroller/executor.go index 487c77345..9178dcd58 100644 --- a/operator/archivecontroller/executor.go +++ b/operator/archivecontroller/executor.go @@ -58,10 +58,9 @@ func (a *ArchiveExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachMoreVolumeMounts() batchJob.Spec.Template.Spec.Volumes = a.attachMoreVolumes() - args, argsErr := a.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args + batchJob.Spec.Template.Spec.Containers[0].Args = a.setupArgs() - return argsErr + return nil }) if err != nil { log.Error(err, "could not create job") @@ -77,14 +76,14 @@ func (a *ArchiveExecutor) jobName() string { return k8upv1.ArchiveType.String() + "-" + a.Obj.GetName() } -func (a *ArchiveExecutor) setupArgs() ([]string, error) { +func (a *ArchiveExecutor) setupArgs() []string { args := []string{"-varDir", cfg.Config.PodVarDir, "-archive", "-restoreType", "s3"} if a.archive.Spec.RestoreSpec != nil && len(a.archive.Spec.RestoreSpec.Tags) > 0 { args = append(args, executor.BuildTagArgs(a.archive.Spec.RestoreSpec.Tags)...) } args = append(args, a.appendOptionsArgs()...) - return args, nil + return args } func (a *ArchiveExecutor) setupEnvVars(ctx context.Context, archive *k8upv1.Archive) []corev1.EnvVar { diff --git a/operator/backupcontroller/backup_utils.go b/operator/backupcontroller/backup_utils.go index 03828cb72..e15f290b0 100644 --- a/operator/backupcontroller/backup_utils.go +++ b/operator/backupcontroller/backup_utils.go @@ -76,14 +76,14 @@ func (b *BackupExecutor) createServiceAccountAndBinding(ctx context.Context) err return err } -func (b *BackupExecutor) setupArgs() ([]string, error) { +func (b *BackupExecutor) setupArgs() []string { args := []string{"--varDir", cfg.Config.PodVarDir} if len(b.backup.Spec.Tags) > 0 { args = append(args, executor.BuildTagArgs(b.backup.Spec.Tags)...) } args = append(args, b.appendOptionsArgs()...) - return args, nil + return args } func (b *BackupExecutor) setupEnvVars() ([]corev1.EnvVar, error) { diff --git a/operator/backupcontroller/executor.go b/operator/backupcontroller/executor.go index ad8a1bb8b..c32ce3fa1 100644 --- a/operator/backupcontroller/executor.go +++ b/operator/backupcontroller/executor.go @@ -271,11 +271,10 @@ func (b *BackupExecutor) startBackup(ctx context.Context) error { b.attachMoreVolumeMounts()..., ) - args, argsErr := b.setupArgs() - batchJob.job.Spec.Template.Spec.Containers[0].Args = args + batchJob.job.Spec.Template.Spec.Containers[0].Args = b.setupArgs() index++ - return argsErr + return nil }) if err != nil { return fmt.Errorf("unable to createOrUpdate(%q): %w", batchJob.job.Name, err) diff --git a/operator/checkcontroller/executor.go b/operator/checkcontroller/executor.go index d228d2a8a..afd36545f 100644 --- a/operator/checkcontroller/executor.go +++ b/operator/checkcontroller/executor.go @@ -58,10 +58,9 @@ func (c *CheckExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Volumes = c.attachMoreVolumes() batchJob.Labels[job.K8upExclusive] = "true" - args, argsErr := c.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args + batchJob.Spec.Template.Spec.Containers[0].Args = c.setupArgs() - return argsErr + return nil }, ) if err != nil { @@ -76,11 +75,11 @@ func (c *CheckExecutor) jobName() string { return k8upv1.CheckType.String() + "-" + c.check.Name } -func (c *CheckExecutor) setupArgs() ([]string, error) { +func (c *CheckExecutor) setupArgs() []string { args := []string{"-varDir", cfg.Config.PodVarDir, "-check"} args = append(args, c.appendOptionsArgs()...) - return args, nil + return args } func (c *CheckExecutor) setupEnvVars(ctx context.Context) []corev1.EnvVar { diff --git a/operator/prunecontroller/executor.go b/operator/prunecontroller/executor.go index 7c71ced5a..504fda47e 100644 --- a/operator/prunecontroller/executor.go +++ b/operator/prunecontroller/executor.go @@ -52,10 +52,9 @@ func (p *PruneExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Volumes = p.attachMoreVolumes() batchJob.Labels[job.K8upExclusive] = "true" - args, argsErr := p.setupArgs() - batchJob.Spec.Template.Spec.Containers[0].Args = args + batchJob.Spec.Template.Spec.Containers[0].Args = p.setupArgs() - return argsErr + return nil }) if err != nil { p.SetConditionFalseWithMessage(ctx, k8upv1.ConditionReady, k8upv1.ReasonCreationFailed, "could not create job: %v", err) @@ -70,14 +69,14 @@ func (p *PruneExecutor) jobName() string { return k8upv1.PruneType.String() + "-" + p.prune.Name } -func (p *PruneExecutor) setupArgs() ([]string, error) { +func (p *PruneExecutor) setupArgs() []string { args := []string{"-varDir", cfg.Config.PodVarDir, "-prune"} if len(p.prune.Spec.Retention.Tags) > 0 { args = append(args, executor.BuildTagArgs(p.prune.Spec.Retention.Tags)...) } args = append(args, p.appendOptionsArgs()...) - return args, nil + return args } // Exclusive should return true for jobs that can't run while other jobs run. From 01cb120af2ed261eba84e547e6409e2cb92a0c4c Mon Sep 17 00:00:00 2001 From: Pooya Azarpour Date: Mon, 8 Apr 2024 14:21:24 +0330 Subject: [PATCH 29/38] [CHANGE] Rename options to tlsOptions Signed-off-by: Pooya Azarpour --- api/v1/backend.go | 4 +- api/v1/restore_types.go | 8 +- api/v1/zz_generated.deepcopy.go | 57 +++---- .../v1/k8up.io_archives.yaml | 36 ++--- .../v1/k8up.io_backups.yaml | 18 +-- .../v1/k8up.io_checks.yaml | 18 +-- .../v1/k8up.io_prunes.yaml | 18 +-- .../v1/k8up.io_restores.yaml | 36 ++--- .../v1/k8up.io_schedules.yaml | 144 +++++++++--------- docs/modules/ROOT/pages/how-tos/archive.adoc | 24 +-- docs/modules/ROOT/pages/how-tos/backup.adoc | 4 +- docs/modules/ROOT/pages/how-tos/restore.adoc | 24 +-- .../archive/s3-mtls-archive-mtls.yaml | 4 +- .../archive/s3-mtls-archive-tls.yaml | 4 +- .../archive/s3-tls-archive-mtls.yaml | 4 +- .../archive/s3-tls-archive-tls.yaml | 4 +- e2e/definitions/backup/backup-mtls.yaml | 2 +- e2e/definitions/backup/backup-tls.yaml | 2 +- e2e/definitions/check/check-mtls.yaml | 2 +- e2e/definitions/check/check-tls.yaml | 2 +- e2e/definitions/restore/restore-mtls.yaml | 2 +- e2e/definitions/restore/restore-tls.yaml | 2 +- .../restore/s3-mtls-restore-mtls.yaml | 4 +- .../restore/s3-mtls-restore-tls.yaml | 4 +- .../restore/s3-tls-restore-mtls.yaml | 4 +- .../restore/s3-tls-restore-tls.yaml | 4 +- operator/archivecontroller/executor.go | 28 ++-- operator/backupcontroller/backup_utils.go | 16 +- operator/checkcontroller/executor.go | 16 +- operator/prunecontroller/executor.go | 16 +- operator/restorecontroller/executor.go | 28 ++-- 31 files changed, 259 insertions(+), 280 deletions(-) diff --git a/api/v1/backend.go b/api/v1/backend.go index 27f152642..37d1af45b 100644 --- a/api/v1/backend.go +++ b/api/v1/backend.go @@ -26,7 +26,7 @@ type ( B2 *B2Spec `json:"b2,omitempty"` Rest *RestServerSpec `json:"rest,omitempty"` - Options *BackendOpts `json:"options,omitempty"` + TLSOptions *TLSOptions `json:"tlsOptions,omitempty"` VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } @@ -283,7 +283,7 @@ func (in *RestServerSpec) String() string { return fmt.Sprintf("rest:%s://%s:%s@%s", protocol, "$(USER)", "$(PASSWORD)", url) } -type BackendOpts struct { +type TLSOptions struct { CACert string `json:"caCert,omitempty"` ClientCert string `json:"clientCert,omitempty"` ClientKey string `json:"clientKey,omitempty"` diff --git a/api/v1/restore_types.go b/api/v1/restore_types.go index 5830e2b4e..231580ccd 100644 --- a/api/v1/restore_types.go +++ b/api/v1/restore_types.go @@ -37,7 +37,7 @@ type RestoreSpec struct { type RestoreMethod struct { S3 *S3Spec `json:"s3,omitempty"` Folder *FolderRestore `json:"folder,omitempty"` - Options *RestoreOpts `json:"options,omitempty"` + TLSOptions *TLSOptions `json:"tlsOptions,omitempty"` VolumeMounts *[]corev1.VolumeMount `json:"volumeMounts,omitempty"` } @@ -147,9 +147,3 @@ func init() { var ( RestoreKind = reflect.TypeOf(Restore{}).Name() ) - -type RestoreOpts struct { - CACert string `json:"caCert,omitempty"` - ClientCert string `json:"clientCert,omitempty"` - ClientKey string `json:"clientKey,omitempty"` -} diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 3eeab0fa8..547ee7bf5 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -210,9 +210,9 @@ func (in *Backend) DeepCopyInto(out *Backend) { *out = new(RestServerSpec) (*in).DeepCopyInto(*out) } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = new(BackendOpts) + if in.TLSOptions != nil { + in, out := &in.TLSOptions, &out.TLSOptions + *out = new(TLSOptions) **out = **in } if in.VolumeMounts != nil { @@ -238,21 +238,6 @@ func (in *Backend) DeepCopy() *Backend { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackendOpts) DeepCopyInto(out *BackendOpts) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendOpts. -func (in *BackendOpts) DeepCopy() *BackendOpts { - if in == nil { - return nil - } - out := new(BackendOpts) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backup) DeepCopyInto(out *Backup) { *out = *in @@ -899,9 +884,9 @@ func (in *RestoreMethod) DeepCopyInto(out *RestoreMethod) { *out = new(FolderRestore) (*in).DeepCopyInto(*out) } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = new(RestoreOpts) + if in.TLSOptions != nil { + in, out := &in.TLSOptions, &out.TLSOptions + *out = new(TLSOptions) **out = **in } if in.VolumeMounts != nil { @@ -927,21 +912,6 @@ func (in *RestoreMethod) DeepCopy() *RestoreMethod { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RestoreOpts) DeepCopyInto(out *RestoreOpts) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreOpts. -func (in *RestoreOpts) DeepCopy() *RestoreOpts { - if in == nil { - return nil - } - out := new(RestoreOpts) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RestoreSchedule) DeepCopyInto(out *RestoreSchedule) { *out = *in @@ -1446,3 +1416,18 @@ func (in *SwiftSpec) DeepCopy() *SwiftSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSOptions) DeepCopyInto(out *TLSOptions) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSOptions. +func (in *TLSOptions) DeepCopy() *TLSOptions { + if in == nil { + return nil + } + out := new(TLSOptions) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml index c18e7c277..28e6d7363 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_archives.yaml @@ -252,15 +252,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -386,6 +377,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within @@ -694,15 +694,6 @@ spec: required: - claimName type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object s3: properties: accessKeyIDSecretRef: @@ -752,6 +743,15 @@ spec: type: object x-kubernetes-map-type: atomic type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml index ec94920b1..df28d14a6 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_backups.yaml @@ -259,15 +259,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -393,6 +384,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml index 0eeb7c07e..0c0b100ec 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_checks.yaml @@ -254,15 +254,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -388,6 +379,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml index d571c79ae..eed300811 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_prunes.yaml @@ -254,15 +254,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -388,6 +379,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml index 2c3b55099..08de9a030 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_restores.yaml @@ -254,15 +254,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -388,6 +379,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within @@ -696,15 +696,6 @@ spec: required: - claimName type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object s3: properties: accessKeyIDSecretRef: @@ -754,6 +745,15 @@ spec: type: object x-kubernetes-map-type: atomic type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within diff --git a/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml b/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml index 7214682ed..a0e882b5d 100644 --- a/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml +++ b/config/crd/apiextensions.k8s.io/v1/k8up.io_schedules.yaml @@ -245,15 +245,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -379,6 +370,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume @@ -689,15 +689,6 @@ spec: required: - claimName type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object s3: properties: accessKeyIDSecretRef: @@ -747,6 +738,15 @@ spec: type: object x-kubernetes-map-type: atomic type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume @@ -1167,15 +1167,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -1301,6 +1292,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume within @@ -1550,15 +1550,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -1684,6 +1675,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume @@ -2367,15 +2367,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -2501,6 +2492,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume @@ -3359,15 +3359,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -3493,6 +3484,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume @@ -4250,15 +4250,6 @@ spec: mountPath: type: string type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object repoPasswordSecretRef: description: RepoPasswordSecretRef references a secret key to look up the restic repository password @@ -4384,6 +4375,15 @@ spec: path: type: string type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume @@ -4694,15 +4694,6 @@ spec: required: - claimName type: object - options: - properties: - caCert: - type: string - clientCert: - type: string - clientKey: - type: string - type: object s3: properties: accessKeyIDSecretRef: @@ -4752,6 +4743,15 @@ spec: type: object x-kubernetes-map-type: atomic type: object + tlsOptions: + properties: + caCert: + type: string + clientCert: + type: string + clientKey: + type: string + type: object volumeMounts: items: description: VolumeMount describes a mounting of a Volume diff --git a/docs/modules/ROOT/pages/how-tos/archive.adoc b/docs/modules/ROOT/pages/how-tos/archive.adoc index b9f851b16..34d57f6ae 100644 --- a/docs/modules/ROOT/pages/how-tos/archive.adoc +++ b/docs/modules/ROOT/pages/how-tos/archive.adoc @@ -31,7 +31,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -108,7 +108,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -179,7 +179,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -188,7 +188,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt podSecurityContext: @@ -257,7 +257,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -266,7 +266,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/custom-ca/ca.crt volumeMounts: - name: custom-ca-tls @@ -352,7 +352,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -433,7 +433,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -508,7 +508,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -519,7 +519,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -594,7 +594,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -605,7 +605,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/custom-tls/ca.crt clientCert: /mnt/custom-tls/tls.crt clientKey: /mnt/custom-tls/tls.key diff --git a/docs/modules/ROOT/pages/how-tos/backup.adoc b/docs/modules/ROOT/pages/how-tos/backup.adoc index e61000d93..9002d9a45 100644 --- a/docs/modules/ROOT/pages/how-tos/backup.adoc +++ b/docs/modules/ROOT/pages/how-tos/backup.adoc @@ -33,7 +33,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -103,7 +103,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/docs/modules/ROOT/pages/how-tos/restore.adoc b/docs/modules/ROOT/pages/how-tos/restore.adoc index a4a24dde2..77165b020 100644 --- a/docs/modules/ROOT/pages/how-tos/restore.adoc +++ b/docs/modules/ROOT/pages/how-tos/restore.adoc @@ -360,7 +360,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -439,7 +439,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -512,7 +512,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -521,7 +521,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt podSecurityContext: @@ -592,7 +592,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt volumeMounts: - name: ca-tls @@ -601,7 +601,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/custom-ca/ca.crt volumeMounts: - name: custom-ca-tls @@ -689,7 +689,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/ca/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -772,7 +772,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -849,7 +849,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -860,7 +860,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -937,7 +937,7 @@ spec: backend: s3: {} - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -948,7 +948,7 @@ spec: restoreMethod: s3: {} - options: + tlsOptions: caCert: /mnt/custom-tls/ca.crt clientCert: /mnt/custom-tls/tls.crt clientKey: /mnt/custom-tls/tls.key diff --git a/e2e/definitions/archive/s3-mtls-archive-mtls.yaml b/e2e/definitions/archive/s3-mtls-archive-mtls.yaml index 4ef5d7d28..ae8875fee 100644 --- a/e2e/definitions/archive/s3-mtls-archive-mtls.yaml +++ b/e2e/definitions/archive/s3-mtls-archive-mtls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -24,7 +24,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/e2e/definitions/archive/s3-mtls-archive-tls.yaml b/e2e/definitions/archive/s3-mtls-archive-tls.yaml index 7f0fd19cb..0c863a9b5 100644 --- a/e2e/definitions/archive/s3-mtls-archive-tls.yaml +++ b/e2e/definitions/archive/s3-mtls-archive-tls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -27,7 +27,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local diff --git a/e2e/definitions/archive/s3-tls-archive-mtls.yaml b/e2e/definitions/archive/s3-tls-archive-mtls.yaml index 5520a5cb8..ee4329bd7 100644 --- a/e2e/definitions/archive/s3-tls-archive-mtls.yaml +++ b/e2e/definitions/archive/s3-tls-archive-mtls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local @@ -25,7 +25,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/e2e/definitions/archive/s3-tls-archive-tls.yaml b/e2e/definitions/archive/s3-tls-archive-tls.yaml index 5c2930019..2e78eae25 100644 --- a/e2e/definitions/archive/s3-tls-archive-tls.yaml +++ b/e2e/definitions/archive/s3-tls-archive-tls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local @@ -22,7 +22,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local diff --git a/e2e/definitions/backup/backup-mtls.yaml b/e2e/definitions/backup/backup-mtls.yaml index e949f877a..d369b0b76 100644 --- a/e2e/definitions/backup/backup-mtls.yaml +++ b/e2e/definitions/backup/backup-mtls.yaml @@ -10,7 +10,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/e2e/definitions/backup/backup-tls.yaml b/e2e/definitions/backup/backup-tls.yaml index 9487dbed2..10e2245ea 100644 --- a/e2e/definitions/backup/backup-tls.yaml +++ b/e2e/definitions/backup/backup-tls.yaml @@ -10,7 +10,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local diff --git a/e2e/definitions/check/check-mtls.yaml b/e2e/definitions/check/check-mtls.yaml index a8313251a..0836d2a14 100644 --- a/e2e/definitions/check/check-mtls.yaml +++ b/e2e/definitions/check/check-mtls.yaml @@ -10,7 +10,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/e2e/definitions/check/check-tls.yaml b/e2e/definitions/check/check-tls.yaml index 2e27ec3e1..720469999 100644 --- a/e2e/definitions/check/check-tls.yaml +++ b/e2e/definitions/check/check-tls.yaml @@ -10,7 +10,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local diff --git a/e2e/definitions/restore/restore-mtls.yaml b/e2e/definitions/restore/restore-mtls.yaml index 6f89631a5..522b5debe 100644 --- a/e2e/definitions/restore/restore-mtls.yaml +++ b/e2e/definitions/restore/restore-mtls.yaml @@ -13,7 +13,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/e2e/definitions/restore/restore-tls.yaml b/e2e/definitions/restore/restore-tls.yaml index c19824413..8e174872c 100644 --- a/e2e/definitions/restore/restore-tls.yaml +++ b/e2e/definitions/restore/restore-tls.yaml @@ -13,7 +13,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local diff --git a/e2e/definitions/restore/s3-mtls-restore-mtls.yaml b/e2e/definitions/restore/s3-mtls-restore-mtls.yaml index 3c3afe4ec..d84566f11 100644 --- a/e2e/definitions/restore/s3-mtls-restore-mtls.yaml +++ b/e2e/definitions/restore/s3-mtls-restore-mtls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -24,7 +24,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/e2e/definitions/restore/s3-mtls-restore-tls.yaml b/e2e/definitions/restore/s3-mtls-restore-tls.yaml index e5800187c..86928791f 100644 --- a/e2e/definitions/restore/s3-mtls-restore-tls.yaml +++ b/e2e/definitions/restore/s3-mtls-restore-tls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key @@ -27,7 +27,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local diff --git a/e2e/definitions/restore/s3-tls-restore-mtls.yaml b/e2e/definitions/restore/s3-tls-restore-mtls.yaml index 368eaffd9..62799e559 100644 --- a/e2e/definitions/restore/s3-tls-restore-mtls.yaml +++ b/e2e/definitions/restore/s3-tls-restore-mtls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local @@ -25,7 +25,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/tls/ca.crt clientCert: /mnt/tls/tls.crt clientKey: /mnt/tls/tls.key diff --git a/e2e/definitions/restore/s3-tls-restore-tls.yaml b/e2e/definitions/restore/s3-tls-restore-tls.yaml index 71bc61223..83e53092f 100644 --- a/e2e/definitions/restore/s3-tls-restore-tls.yaml +++ b/e2e/definitions/restore/s3-tls-restore-tls.yaml @@ -7,7 +7,7 @@ spec: failedJobsHistoryLimit: 1 successfulJobsHistoryLimit: 1 restoreMethod: - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local @@ -22,7 +22,7 @@ spec: repoPasswordSecretRef: name: backup-repo key: password - options: + tlsOptions: caCert: /mnt/ca/ca.crt s3: endpoint: https://minio-tls.minio-e2e.svc.cluster.local diff --git a/operator/archivecontroller/executor.go b/operator/archivecontroller/executor.go index 9178dcd58..af285d5f9 100644 --- a/operator/archivecontroller/executor.go +++ b/operator/archivecontroller/executor.go @@ -81,7 +81,7 @@ func (a *ArchiveExecutor) setupArgs() []string { if a.archive.Spec.RestoreSpec != nil && len(a.archive.Spec.RestoreSpec.Tags) > 0 { args = append(args, executor.BuildTagArgs(a.archive.Spec.RestoreSpec.Tags)...) } - args = append(args, a.appendOptionsArgs()...) + args = append(args, a.appendTLSOptionsArgs()...) return args } @@ -126,34 +126,34 @@ func (a *ArchiveExecutor) cleanupOldArchives(ctx context.Context, archive *k8upv a.CleanupOldResources(ctx, &k8upv1.ArchiveList{}, archive.Namespace, archive) } -func (a *ArchiveExecutor) appendOptionsArgs() []string { +func (a *ArchiveExecutor) appendTLSOptionsArgs() []string { var args []string - if a.archive.Spec.Backend != nil && a.archive.Spec.Backend.Options != nil { - if a.archive.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"-caCert", a.archive.Spec.Backend.Options.CACert}...) + if a.archive.Spec.Backend != nil && a.archive.Spec.Backend.TLSOptions != nil { + if a.archive.Spec.Backend.TLSOptions.CACert != "" { + args = append(args, []string{"-caCert", a.archive.Spec.Backend.TLSOptions.CACert}...) } - if a.archive.Spec.Backend.Options.ClientCert != "" && a.archive.Spec.Backend.Options.ClientKey != "" { + if a.archive.Spec.Backend.TLSOptions.ClientCert != "" && a.archive.Spec.Backend.TLSOptions.ClientKey != "" { addMoreArgs := []string{ "-clientCert", - a.archive.Spec.Backend.Options.ClientCert, + a.archive.Spec.Backend.TLSOptions.ClientCert, "-clientKey", - a.archive.Spec.Backend.Options.ClientKey, + a.archive.Spec.Backend.TLSOptions.ClientKey, } args = append(args, addMoreArgs...) } } - if a.archive.Spec.RestoreSpec != nil && a.archive.Spec.RestoreMethod.Options != nil { - if a.archive.Spec.RestoreMethod.Options.CACert != "" { - args = append(args, []string{"-restoreCaCert", a.archive.Spec.RestoreMethod.Options.CACert}...) + if a.archive.Spec.RestoreSpec != nil && a.archive.Spec.RestoreMethod.TLSOptions != nil { + if a.archive.Spec.RestoreMethod.TLSOptions.CACert != "" { + args = append(args, []string{"-restoreCaCert", a.archive.Spec.RestoreMethod.TLSOptions.CACert}...) } - if a.archive.Spec.RestoreMethod.Options.ClientCert != "" && a.archive.Spec.RestoreMethod.Options.ClientKey != "" { + if a.archive.Spec.RestoreMethod.TLSOptions.ClientCert != "" && a.archive.Spec.RestoreMethod.TLSOptions.ClientKey != "" { addMoreArgs := []string{ "-restoreClientCert", - a.archive.Spec.RestoreMethod.Options.ClientCert, + a.archive.Spec.RestoreMethod.TLSOptions.ClientCert, "-restoreClientKey", - a.archive.Spec.RestoreMethod.Options.ClientKey, + a.archive.Spec.RestoreMethod.TLSOptions.ClientKey, } args = append(args, addMoreArgs...) } diff --git a/operator/backupcontroller/backup_utils.go b/operator/backupcontroller/backup_utils.go index e15f290b0..35171b199 100644 --- a/operator/backupcontroller/backup_utils.go +++ b/operator/backupcontroller/backup_utils.go @@ -81,7 +81,7 @@ func (b *BackupExecutor) setupArgs() []string { if len(b.backup.Spec.Tags) > 0 { args = append(args, executor.BuildTagArgs(b.backup.Spec.Tags)...) } - args = append(args, b.appendOptionsArgs()...) + args = append(args, b.appendTLSOptionsArgs()...) return args } @@ -162,22 +162,22 @@ func (b *BackupExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { return volumeMount } -func (b *BackupExecutor) appendOptionsArgs() []string { +func (b *BackupExecutor) appendTLSOptionsArgs() []string { var args []string - if !(b.backup.Spec.Backend != nil && b.backup.Spec.Backend.Options != nil) { + if !(b.backup.Spec.Backend != nil && b.backup.Spec.Backend.TLSOptions != nil) { return args } - if b.backup.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"-caCert", b.backup.Spec.Backend.Options.CACert}...) + if b.backup.Spec.Backend.TLSOptions.CACert != "" { + args = append(args, []string{"-caCert", b.backup.Spec.Backend.TLSOptions.CACert}...) } - if b.backup.Spec.Backend.Options.ClientCert != "" && b.backup.Spec.Backend.Options.ClientKey != "" { + if b.backup.Spec.Backend.TLSOptions.ClientCert != "" && b.backup.Spec.Backend.TLSOptions.ClientKey != "" { addMoreArgs := []string{ "-clientCert", - b.backup.Spec.Backend.Options.ClientCert, + b.backup.Spec.Backend.TLSOptions.ClientCert, "-clientKey", - b.backup.Spec.Backend.Options.ClientKey, + b.backup.Spec.Backend.TLSOptions.ClientKey, } args = append(args, addMoreArgs...) } diff --git a/operator/checkcontroller/executor.go b/operator/checkcontroller/executor.go index afd36545f..4d5a9a703 100644 --- a/operator/checkcontroller/executor.go +++ b/operator/checkcontroller/executor.go @@ -77,7 +77,7 @@ func (c *CheckExecutor) jobName() string { func (c *CheckExecutor) setupArgs() []string { args := []string{"-varDir", cfg.Config.PodVarDir, "-check"} - args = append(args, c.appendOptionsArgs()...) + args = append(args, c.appendTLSOptionsArgs()...) return args } @@ -109,21 +109,21 @@ func (c *CheckExecutor) cleanupOldChecks(ctx context.Context, check *k8upv1.Chec c.CleanupOldResources(ctx, &k8upv1.CheckList{}, check.Namespace, check) } -func (c *CheckExecutor) appendOptionsArgs() []string { +func (c *CheckExecutor) appendTLSOptionsArgs() []string { var args []string - if !(c.check.Spec.Backend != nil && c.check.Spec.Backend.Options != nil) { + if !(c.check.Spec.Backend != nil && c.check.Spec.Backend.TLSOptions != nil) { return args } - if c.check.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"-caCert", c.check.Spec.Backend.Options.CACert}...) + if c.check.Spec.Backend.TLSOptions.CACert != "" { + args = append(args, []string{"-caCert", c.check.Spec.Backend.TLSOptions.CACert}...) } - if c.check.Spec.Backend.Options.ClientCert != "" && c.check.Spec.Backend.Options.ClientKey != "" { + if c.check.Spec.Backend.TLSOptions.ClientCert != "" && c.check.Spec.Backend.TLSOptions.ClientKey != "" { addMoreArgs := []string{ "-clientCert", - c.check.Spec.Backend.Options.ClientCert, + c.check.Spec.Backend.TLSOptions.ClientCert, "-clientKey", - c.check.Spec.Backend.Options.ClientKey, + c.check.Spec.Backend.TLSOptions.ClientKey, } args = append(args, addMoreArgs...) } diff --git a/operator/prunecontroller/executor.go b/operator/prunecontroller/executor.go index 504fda47e..ccd4c53e5 100644 --- a/operator/prunecontroller/executor.go +++ b/operator/prunecontroller/executor.go @@ -74,7 +74,7 @@ func (p *PruneExecutor) setupArgs() []string { if len(p.prune.Spec.Retention.Tags) > 0 { args = append(args, executor.BuildTagArgs(p.prune.Spec.Retention.Tags)...) } - args = append(args, p.appendOptionsArgs()...) + args = append(args, p.appendTLSOptionsArgs()...) return args } @@ -141,21 +141,21 @@ func (p *PruneExecutor) setupEnvVars(ctx context.Context, prune *k8upv1.Prune) [ return vars.Convert() } -func (p *PruneExecutor) appendOptionsArgs() []string { +func (p *PruneExecutor) appendTLSOptionsArgs() []string { var args []string - if !(p.prune.Spec.Backend != nil && p.prune.Spec.Backend.Options != nil) { + if !(p.prune.Spec.Backend != nil && p.prune.Spec.Backend.TLSOptions != nil) { return args } - if p.prune.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"-caCert", p.prune.Spec.Backend.Options.CACert}...) + if p.prune.Spec.Backend.TLSOptions.CACert != "" { + args = append(args, []string{"-caCert", p.prune.Spec.Backend.TLSOptions.CACert}...) } - if p.prune.Spec.Backend.Options.ClientCert != "" && p.prune.Spec.Backend.Options.ClientKey != "" { + if p.prune.Spec.Backend.TLSOptions.ClientCert != "" && p.prune.Spec.Backend.TLSOptions.ClientKey != "" { addMoreArgs := []string{ "-clientCert", - p.prune.Spec.Backend.Options.ClientCert, + p.prune.Spec.Backend.TLSOptions.ClientCert, "-clientKey", - p.prune.Spec.Backend.Options.ClientKey, + p.prune.Spec.Backend.TLSOptions.ClientKey, } args = append(args, addMoreArgs...) } diff --git a/operator/restorecontroller/executor.go b/operator/restorecontroller/executor.go index 2cd619a2a..4ff464a17 100644 --- a/operator/restorecontroller/executor.go +++ b/operator/restorecontroller/executor.go @@ -119,7 +119,7 @@ func (r *RestoreExecutor) setupArgs(restore *k8upv1.Restore) ([]string, error) { return nil, fmt.Errorf("undefined restore method (-restoreType) on '%v/%v'", restore.Namespace, restore.Name) } - args = append(args, r.appendOptionsArgs()...) + args = append(args, r.appendTLSOptionsArgs()...) return args, nil } @@ -183,34 +183,34 @@ func (r *RestoreExecutor) setupEnvVars(ctx context.Context, restore *k8upv1.Rest return vars.Convert() } -func (r *RestoreExecutor) appendOptionsArgs() []string { +func (r *RestoreExecutor) appendTLSOptionsArgs() []string { var args []string - if r.restore.Spec.Backend != nil && r.restore.Spec.Backend.Options != nil { - if r.restore.Spec.Backend.Options.CACert != "" { - args = append(args, []string{"--caCert", r.restore.Spec.Backend.Options.CACert}...) + if r.restore.Spec.Backend != nil && r.restore.Spec.Backend.TLSOptions != nil { + if r.restore.Spec.Backend.TLSOptions.CACert != "" { + args = append(args, []string{"--caCert", r.restore.Spec.Backend.TLSOptions.CACert}...) } - if r.restore.Spec.Backend.Options.ClientCert != "" && r.restore.Spec.Backend.Options.ClientKey != "" { + if r.restore.Spec.Backend.TLSOptions.ClientCert != "" && r.restore.Spec.Backend.TLSOptions.ClientKey != "" { addMoreArgs := []string{ "--clientCert", - r.restore.Spec.Backend.Options.ClientCert, + r.restore.Spec.Backend.TLSOptions.ClientCert, "--clientKey", - r.restore.Spec.Backend.Options.ClientKey, + r.restore.Spec.Backend.TLSOptions.ClientKey, } args = append(args, addMoreArgs...) } } - if r.restore.Spec.RestoreMethod != nil && r.restore.Spec.RestoreMethod.Options != nil { - if r.restore.Spec.RestoreMethod.Options.CACert != "" { - args = append(args, []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.Options.CACert}...) + if r.restore.Spec.RestoreMethod != nil && r.restore.Spec.RestoreMethod.TLSOptions != nil { + if r.restore.Spec.RestoreMethod.TLSOptions.CACert != "" { + args = append(args, []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.TLSOptions.CACert}...) } - if r.restore.Spec.RestoreMethod.Options.ClientCert != "" && r.restore.Spec.RestoreMethod.Options.ClientKey != "" { + if r.restore.Spec.RestoreMethod.TLSOptions.ClientCert != "" && r.restore.Spec.RestoreMethod.TLSOptions.ClientKey != "" { addMoreArgs := []string{ "--restoreClientCert", - r.restore.Spec.RestoreMethod.Options.ClientCert, + r.restore.Spec.RestoreMethod.TLSOptions.ClientCert, "--restoreClientKey", - r.restore.Spec.RestoreMethod.Options.ClientKey, + r.restore.Spec.RestoreMethod.TLSOptions.ClientKey, } args = append(args, addMoreArgs...) } From b59589a30428a9b1f7958dbc61bb962c23f59cac Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 00:30:11 +0330 Subject: [PATCH 30/38] [UPDATE] Update documents because of changing options to tlsOptions Signed-off-by: poyaz --- docs/modules/ROOT/examples/usage/operator.txt | 4 +- docs/modules/ROOT/examples/usage/restic.txt | 2 +- .../ROOT/pages/references/api-reference.adoc | 62 +++++++------------ 3 files changed, 25 insertions(+), 43 deletions(-) diff --git a/docs/modules/ROOT/examples/usage/operator.txt b/docs/modules/ROOT/examples/usage/operator.txt index f43fbc136..98e2be159 100644 --- a/docs/modules/ROOT/examples/usage/operator.txt +++ b/docs/modules/ROOT/examples/usage/operator.txt @@ -18,7 +18,7 @@ OPTIONS: --global-concurrent-check-jobs-limit value set the limit of concurrent check jobs (default: unlimited) [$BACKUP_GLOBAL_CONCURRENT_CHECK_JOBS_LIMIT] --global-concurrent-prune-jobs-limit value set the limit of concurrent prune jobs (default: unlimited) [$BACKUP_GLOBAL_CONCURRENT_PRUNE_JOBS_LIMIT] --global-concurrent-restore-jobs-limit value set the limit of concurrent restore jobs (default: unlimited) [$BACKUP_GLOBAL_CONCURRENT_RESTORE_JOBS_LIMIT] - --globalrestores3accesskeyid value set the global restore S3 accessKeyID for restores [$BACKUP_GLOBALRESTORES3ACCESKEYID] / [$BACKUP_GLOBALRESTORES3ACCESSKEYID] + --globalrestores3accesskeyid value set the global restore S3 accessKeyID for restores [$BACKUP_GLOBALRESTORES3ACCESKEYID, $BACKUP_GLOBALRESTORES3ACCESSKEYID] --globalrestores3bucket value set the global restore S3 bucket for restores [$BACKUP_GLOBALRESTORES3BUCKET] --globalrestores3endpoint value set the global restore S3 endpoint for the restores (needs the scheme 'http' or 'https') [$BACKUP_GLOBALRESTORES3ENDPOINT] --globalrestores3secretaccesskey value set the global restore S3 SecretAccessKey for restores [$BACKUP_GLOBALRESTORES3SECRETACCESSKEY] @@ -46,5 +46,5 @@ OPTIONS: --skip-pvcs-without-annotation skip selecting PVCs that don't have the BACKUP_ANNOTATION (default: disabled) [$BACKUP_SKIP_WITHOUT_ANNOTATION] --checkschedule value the default check schedule (default: "0 0 * * 0") [$BACKUP_CHECKSCHEDULE] --operator-namespace value set the namespace in which the K8up operator itself runs [$BACKUP_OPERATOR_NAMESPACE] - --vardir value the var data dir for read/write k8up data or temp file in the backup pod (default: /k8up) [$VAR_DIR] + --vardir value the var data dir for read/write k8up data or temp file in the backup pod (default: "/k8up") [$VAR_DIR] --help, -h show help (default: false) diff --git a/docs/modules/ROOT/examples/usage/restic.txt b/docs/modules/ROOT/examples/usage/restic.txt index 6127aff2d..252c7bed4 100644 --- a/docs/modules/ROOT/examples/usage/restic.txt +++ b/docs/modules/ROOT/examples/usage/restic.txt @@ -50,7 +50,7 @@ OPTIONS: --keepWithin value While pruning, keep tagged snapshots within the given duration, e.g. '2y5m7d3h' [$KEEP_WITHIN] --targetPods value [ --targetPods value ] Filter list of pods by TARGET_PODS names [$TARGET_PODS] --sleepDuration value Sleep for specified amount until init starts (default: 0s) [$SLEEP_DURATION] - --varDir value The var directory is stored k8up metadata files and temporary files (default: /k8up) + --varDir value The var directory is stored k8up metadata files and temporary files (default: "/k8up") --caCert value The certificate authority file path [$CA_CERT_FILE] --clientCert value The client certificate file path [$CLIENT_CERT_FILE] --clientKey value The client private key file path [$CLIENT_KEY_FILE] diff --git a/docs/modules/ROOT/pages/references/api-reference.adoc b/docs/modules/ROOT/pages/references/api-reference.adoc index 6abdf7c30..233a24572 100644 --- a/docs/modules/ROOT/pages/references/api-reference.adoc +++ b/docs/modules/ROOT/pages/references/api-reference.adoc @@ -173,32 +173,13 @@ It is expected that users only configure one storage type. | *`swift`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-swiftspec[$$SwiftSpec$$]__ | | *`b2`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-b2spec[$$B2Spec$$]__ | | *`rest`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restserverspec[$$RestServerSpec$$]__ | -| *`options`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backendopts[$$BackendOpts$$]__ | +| *`tlsOptions`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-tlsoptions[$$TLSOptions$$]__ | | *`volumeMounts`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#volumemount-v1-core[$$VolumeMount$$]__ | |=== -[id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backendopts"] -=== BackendOpts - - - -.Appears In: -**** -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] -**** - -[cols="25a,75a", options="header"] -|=== -| Field | Description -| *`caCert`* __string__ | -| *`clientCert`* __string__ | -| *`clientKey`* __string__ | -|=== - - [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backup"] === Backup @@ -763,30 +744,11 @@ all the settings are mutual exclusive. | Field | Description | *`s3`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-s3spec[$$S3Spec$$]__ | | *`folder`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-folderrestore[$$FolderRestore$$]__ | -| *`options`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreopts[$$RestoreOpts$$]__ | +| *`tlsOptions`* __xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-tlsoptions[$$TLSOptions$$]__ | | *`volumeMounts`* __link:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#volumemount-v1-core[$$VolumeMount$$]__ | |=== -[id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreopts"] -=== RestoreOpts - - - -.Appears In: -**** -- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$] -**** - -[cols="25a,75a", options="header"] -|=== -| Field | Description -| *`caCert`* __string__ | -| *`clientCert`* __string__ | -| *`clientKey`* __string__ | -|=== - - [id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoreschedule"] === RestoreSchedule @@ -1161,3 +1123,23 @@ can be restored. |=== +[id="{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-tlsoptions"] +=== TLSOptions + + + +.Appears In: +**** +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-backend[$$Backend$$] +- xref:{anchor_prefix}-github-com-k8up-io-k8up-v2-api-v1-restoremethod[$$RestoreMethod$$] +**** + +[cols="25a,75a", options="header"] +|=== +| Field | Description +| *`caCert`* __string__ | +| *`clientCert`* __string__ | +| *`clientKey`* __string__ | +|=== + + From 0acef98cd79373ef5c3aafb7d392066d05eefda8 Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 00:32:34 +0330 Subject: [PATCH 31/38] [UPDATE] Refactoring code for duplciate fucntions in operators These functions is created in utils: - AppendTLSOptionsArgs: for generate env for backend and restore specs - AttachTLSVolumes: for create volumes for pods AttachTLSVolumeMounts: for create volumeMount for backend and restore specs Signed-off-by: poyaz --- operator/archivecontroller/executor.go | 124 +++----------------- operator/backupcontroller/backup_utils.go | 81 +------------ operator/backupcontroller/executor.go | 17 ++- operator/checkcontroller/executor.go | 83 ++------------ operator/prunecontroller/executor.go | 83 ++------------ operator/restorecontroller/executor.go | 131 +++------------------- operator/utils/utils.go | 118 +++++++++++++++++++ 7 files changed, 184 insertions(+), 453 deletions(-) diff --git a/operator/archivecontroller/executor.go b/operator/archivecontroller/executor.go index af285d5f9..9e594443d 100644 --- a/operator/archivecontroller/executor.go +++ b/operator/archivecontroller/executor.go @@ -2,7 +2,6 @@ package archivecontroller import ( "context" - "github.com/k8up-io/k8up/v2/operator/executor" "github.com/k8up-io/k8up/v2/operator/utils" batchv1 "k8s.io/api/batch/v1" @@ -16,8 +15,8 @@ import ( ) const ( - archivePath = "/archive" - _dataDirName = "k8up-dir" + archivePath = "/archive" + certPrefixName = "restore" ) // ArchiveExecutor will execute the batch.job for archive. @@ -55,8 +54,8 @@ func (a *ArchiveExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Containers[0].Env = a.setupEnvVars(ctx, a.archive) a.archive.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = a.attachMoreVolumes() + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = a.attachTLSVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = utils.AttachTLSVolumes(a.archive.Spec.Volumes) batchJob.Spec.Template.Spec.Containers[0].Args = a.setupArgs() @@ -81,7 +80,12 @@ func (a *ArchiveExecutor) setupArgs() []string { if a.archive.Spec.RestoreSpec != nil && len(a.archive.Spec.RestoreSpec.Tags) > 0 { args = append(args, executor.BuildTagArgs(a.archive.Spec.RestoreSpec.Tags)...) } - args = append(args, a.appendTLSOptionsArgs()...) + if a.archive.Spec.Backend != nil { + args = append(args, utils.AppendTLSOptionsArgs(a.archive.Spec.Backend.TLSOptions)...) + } + if a.archive.Spec.RestoreSpec != nil && a.archive.Spec.RestoreSpec.RestoreMethod != nil { + args = append(args, utils.AppendTLSOptionsArgs(a.archive.Spec.RestoreSpec.RestoreMethod.TLSOptions, certPrefixName)...) + } return args } @@ -126,110 +130,14 @@ func (a *ArchiveExecutor) cleanupOldArchives(ctx context.Context, archive *k8upv a.CleanupOldResources(ctx, &k8upv1.ArchiveList{}, archive.Namespace, archive) } -func (a *ArchiveExecutor) appendTLSOptionsArgs() []string { - var args []string - - if a.archive.Spec.Backend != nil && a.archive.Spec.Backend.TLSOptions != nil { - if a.archive.Spec.Backend.TLSOptions.CACert != "" { - args = append(args, []string{"-caCert", a.archive.Spec.Backend.TLSOptions.CACert}...) - } - if a.archive.Spec.Backend.TLSOptions.ClientCert != "" && a.archive.Spec.Backend.TLSOptions.ClientKey != "" { - addMoreArgs := []string{ - "-clientCert", - a.archive.Spec.Backend.TLSOptions.ClientCert, - "-clientKey", - a.archive.Spec.Backend.TLSOptions.ClientKey, - } - args = append(args, addMoreArgs...) - } - } - - if a.archive.Spec.RestoreSpec != nil && a.archive.Spec.RestoreMethod.TLSOptions != nil { - if a.archive.Spec.RestoreMethod.TLSOptions.CACert != "" { - args = append(args, []string{"-restoreCaCert", a.archive.Spec.RestoreMethod.TLSOptions.CACert}...) - } - if a.archive.Spec.RestoreMethod.TLSOptions.ClientCert != "" && a.archive.Spec.RestoreMethod.TLSOptions.ClientKey != "" { - addMoreArgs := []string{ - "-restoreClientCert", - a.archive.Spec.RestoreMethod.TLSOptions.ClientCert, - "-restoreClientKey", - a.archive.Spec.RestoreMethod.TLSOptions.ClientKey, - } - args = append(args, addMoreArgs...) - } - } - - return args -} - -func (a *ArchiveExecutor) attachMoreVolumes() []corev1.Volume { - ku8pVolume := corev1.Volume{ - Name: _dataDirName, - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - } - - if utils.ZeroLen(a.archive.Spec.Volumes) { - return []corev1.Volume{ku8pVolume} - } - - moreVolumes := make([]corev1.Volume, 0, len(*a.archive.Spec.Volumes)+1) - moreVolumes = append(moreVolumes, ku8pVolume) - for _, v := range *a.archive.Spec.Volumes { - vol := v - - var volumeSource corev1.VolumeSource - if vol.PersistentVolumeClaim != nil { - volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim - } else if vol.Secret != nil { - volumeSource.Secret = vol.Secret - } else if vol.ConfigMap != nil { - volumeSource.ConfigMap = vol.ConfigMap - } else { - continue - } - - addVolume := corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - } - moreVolumes = append(moreVolumes, addVolume) - } - - return moreVolumes -} - -func (a *ArchiveExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { - var volumeMount []corev1.VolumeMount - +func (a *ArchiveExecutor) attachTLSVolumeMounts() []corev1.VolumeMount { + var tlsVolumeMounts []corev1.VolumeMount if a.archive.Spec.Backend != nil && !utils.ZeroLen(a.archive.Spec.Backend.VolumeMounts) { - volumeMount = append(volumeMount, *a.archive.Spec.Backend.VolumeMounts...) + tlsVolumeMounts = append(tlsVolumeMounts, *a.archive.Spec.Backend.VolumeMounts...) } - if a.archive.Spec.RestoreMethod != nil && !utils.ZeroLen(a.archive.Spec.RestoreMethod.VolumeMounts) { - for _, v1 := range *a.archive.Spec.RestoreMethod.VolumeMounts { - vm1 := v1 - var isExist bool - - for _, v2 := range volumeMount { - vm2 := v2 - if vm1.Name == vm2.Name && vm1.MountPath == vm2.MountPath { - isExist = true - break - } - } - - if isExist { - continue - } - - volumeMount = append(volumeMount, vm1) - } - } - - addVolumeMount := corev1.VolumeMount{ - Name: _dataDirName, - MountPath: cfg.Config.PodVarDir, + if a.archive.Spec.RestoreSpec != nil && a.archive.Spec.RestoreSpec.RestoreMethod != nil && !utils.ZeroLen(a.archive.Spec.RestoreSpec.RestoreMethod.VolumeMounts) { + tlsVolumeMounts = append(tlsVolumeMounts, *a.archive.Spec.RestoreSpec.RestoreMethod.VolumeMounts...) } - volumeMount = append(volumeMount, addVolumeMount) - return volumeMount + return utils.AttachTLSVolumeMounts(cfg.Config.PodVarDir, &tlsVolumeMounts) } diff --git a/operator/backupcontroller/backup_utils.go b/operator/backupcontroller/backup_utils.go index 35171b199..eed9be5cf 100644 --- a/operator/backupcontroller/backup_utils.go +++ b/operator/backupcontroller/backup_utils.go @@ -14,8 +14,6 @@ import ( "github.com/k8up-io/k8up/v2/operator/cfg" ) -const _dataDirName = "k8up-dir" - func (b *BackupExecutor) fetchPVCs(ctx context.Context, list client.ObjectList) error { return b.Config.Client.List(ctx, list, client.InNamespace(b.backup.Namespace)) } @@ -81,7 +79,9 @@ func (b *BackupExecutor) setupArgs() []string { if len(b.backup.Spec.Tags) > 0 { args = append(args, executor.BuildTagArgs(b.backup.Spec.Tags)...) } - args = append(args, b.appendTLSOptionsArgs()...) + if b.backup.Spec.Backend != nil { + args = append(args, utils.AppendTLSOptionsArgs(b.backup.Spec.Backend.TLSOptions)...) + } return args } @@ -109,78 +109,3 @@ func (b *BackupExecutor) setupEnvVars() ([]corev1.EnvVar, error) { } return vars.Convert(), nil } - -func (b *BackupExecutor) attachMoreVolumes() []corev1.Volume { - ku8pVolume := corev1.Volume{ - Name: _dataDirName, - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - } - - if utils.ZeroLen(b.backup.Spec.Volumes) { - return []corev1.Volume{ku8pVolume} - } - - moreVolumes := make([]corev1.Volume, 0, len(*b.backup.Spec.Volumes)+1) - moreVolumes = append(moreVolumes, ku8pVolume) - for _, v := range *b.backup.Spec.Volumes { - vol := v - - var volumeSource corev1.VolumeSource - if vol.PersistentVolumeClaim != nil { - volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim - } else if vol.Secret != nil { - volumeSource.Secret = vol.Secret - } else if vol.ConfigMap != nil { - volumeSource.ConfigMap = vol.ConfigMap - } else { - continue - } - - addVolume := corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - } - moreVolumes = append(moreVolumes, addVolume) - } - - return moreVolumes -} - -func (b *BackupExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { - var volumeMount []corev1.VolumeMount - - if b.backup.Spec.Backend != nil && !utils.ZeroLen(b.backup.Spec.Backend.VolumeMounts) { - volumeMount = *b.backup.Spec.Backend.VolumeMounts - } - - addVolumeMount := corev1.VolumeMount{ - Name: _dataDirName, - MountPath: cfg.Config.PodVarDir, - } - volumeMount = append(volumeMount, addVolumeMount) - - return volumeMount -} - -func (b *BackupExecutor) appendTLSOptionsArgs() []string { - var args []string - - if !(b.backup.Spec.Backend != nil && b.backup.Spec.Backend.TLSOptions != nil) { - return args - } - - if b.backup.Spec.Backend.TLSOptions.CACert != "" { - args = append(args, []string{"-caCert", b.backup.Spec.Backend.TLSOptions.CACert}...) - } - if b.backup.Spec.Backend.TLSOptions.ClientCert != "" && b.backup.Spec.Backend.TLSOptions.ClientKey != "" { - addMoreArgs := []string{ - "-clientCert", - b.backup.Spec.Backend.TLSOptions.ClientCert, - "-clientKey", - b.backup.Spec.Backend.TLSOptions.ClientKey, - } - args = append(args, addMoreArgs...) - } - - return args -} diff --git a/operator/backupcontroller/executor.go b/operator/backupcontroller/executor.go index c32ce3fa1..e33f68521 100644 --- a/operator/backupcontroller/executor.go +++ b/operator/backupcontroller/executor.go @@ -3,6 +3,7 @@ package backupcontroller import ( "context" "fmt" + "github.com/k8up-io/k8up/v2/operator/utils" "strconv" "strings" "time" @@ -265,11 +266,8 @@ func (b *BackupExecutor) startBackup(ctx context.Context) error { } b.backup.Spec.AppendEnvFromToContainer(&batchJob.job.Spec.Template.Spec.Containers[0]) batchJob.job.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount - batchJob.job.Spec.Template.Spec.Volumes = append(batchJob.volumes, b.attachMoreVolumes()...) - batchJob.job.Spec.Template.Spec.Containers[0].VolumeMounts = append( - b.newVolumeMounts(batchJob.volumes), - b.attachMoreVolumeMounts()..., - ) + batchJob.job.Spec.Template.Spec.Volumes = append(batchJob.volumes, utils.AttachTLSVolumes(b.backup.Spec.Volumes)...) + batchJob.job.Spec.Template.Spec.Containers[0].VolumeMounts = append(b.newVolumeMounts(batchJob.volumes), b.attachTLSVolumeMounts()...) batchJob.job.Spec.Template.Spec.Containers[0].Args = b.setupArgs() @@ -305,3 +303,12 @@ func (b *BackupExecutor) cleanupOldBackups(ctx context.Context) { func (b *BackupExecutor) jobName(name string) string { return k8upv1.BackupType.String() + "-" + b.backup.Name + "-" + name } + +func (b *BackupExecutor) attachTLSVolumeMounts() []corev1.VolumeMount { + var tlsVolumeMounts []corev1.VolumeMount + if b.backup.Spec.Backend != nil && !utils.ZeroLen(b.backup.Spec.Backend.VolumeMounts) { + tlsVolumeMounts = append(tlsVolumeMounts, *b.backup.Spec.Backend.VolumeMounts...) + } + + return utils.AttachTLSVolumeMounts(cfg.Config.PodVarDir, &tlsVolumeMounts) +} diff --git a/operator/checkcontroller/executor.go b/operator/checkcontroller/executor.go index 4d5a9a703..d5bd1e803 100644 --- a/operator/checkcontroller/executor.go +++ b/operator/checkcontroller/executor.go @@ -14,8 +14,6 @@ import ( "github.com/k8up-io/k8up/v2/operator/job" ) -const _dataDirName = "k8up-dir" - // CheckExecutor will execute the batch.job for checks. type CheckExecutor struct { executor.Generic @@ -54,8 +52,8 @@ func (c *CheckExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Containers[0].Env = c.setupEnvVars(ctx) c.check.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = c.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = c.attachMoreVolumes() + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = c.attachTLSVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = utils.AttachTLSVolumes(c.check.Spec.Volumes) batchJob.Labels[job.K8upExclusive] = "true" batchJob.Spec.Template.Spec.Containers[0].Args = c.setupArgs() @@ -77,7 +75,9 @@ func (c *CheckExecutor) jobName() string { func (c *CheckExecutor) setupArgs() []string { args := []string{"-varDir", cfg.Config.PodVarDir, "-check"} - args = append(args, c.appendTLSOptionsArgs()...) + if c.check.Spec.Backend != nil { + args = append(args, utils.AppendTLSOptionsArgs(c.check.Spec.Backend.TLSOptions)...) + } return args } @@ -109,76 +109,11 @@ func (c *CheckExecutor) cleanupOldChecks(ctx context.Context, check *k8upv1.Chec c.CleanupOldResources(ctx, &k8upv1.CheckList{}, check.Namespace, check) } -func (c *CheckExecutor) appendTLSOptionsArgs() []string { - var args []string - if !(c.check.Spec.Backend != nil && c.check.Spec.Backend.TLSOptions != nil) { - return args - } - - if c.check.Spec.Backend.TLSOptions.CACert != "" { - args = append(args, []string{"-caCert", c.check.Spec.Backend.TLSOptions.CACert}...) - } - if c.check.Spec.Backend.TLSOptions.ClientCert != "" && c.check.Spec.Backend.TLSOptions.ClientKey != "" { - addMoreArgs := []string{ - "-clientCert", - c.check.Spec.Backend.TLSOptions.ClientCert, - "-clientKey", - c.check.Spec.Backend.TLSOptions.ClientKey, - } - args = append(args, addMoreArgs...) - } - - return args -} - -func (c *CheckExecutor) attachMoreVolumes() []corev1.Volume { - ku8pVolume := corev1.Volume{ - Name: _dataDirName, - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - } - - if utils.ZeroLen(c.check.Spec.Volumes) { - return []corev1.Volume{ku8pVolume} - } - - moreVolumes := make([]corev1.Volume, 0, len(*c.check.Spec.Volumes)+1) - moreVolumes = append(moreVolumes, ku8pVolume) - for _, v := range *c.check.Spec.Volumes { - vol := v - - var volumeSource corev1.VolumeSource - if vol.PersistentVolumeClaim != nil { - volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim - } else if vol.Secret != nil { - volumeSource.Secret = vol.Secret - } else if vol.ConfigMap != nil { - volumeSource.ConfigMap = vol.ConfigMap - } else { - continue - } - - addVolume := corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - } - moreVolumes = append(moreVolumes, addVolume) - } - - return moreVolumes -} - -func (c *CheckExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { - var volumeMount []corev1.VolumeMount - +func (c *CheckExecutor) attachTLSVolumeMounts() []corev1.VolumeMount { + var tlsVolumeMounts []corev1.VolumeMount if c.check.Spec.Backend != nil && !utils.ZeroLen(c.check.Spec.Backend.VolumeMounts) { - volumeMount = *c.check.Spec.Backend.VolumeMounts - } - - ku8pVolumeMount := corev1.VolumeMount{ - Name: _dataDirName, - MountPath: cfg.Config.PodVarDir, + tlsVolumeMounts = append(tlsVolumeMounts, *c.check.Spec.Backend.VolumeMounts...) } - volumeMount = append(volumeMount, ku8pVolumeMount) - return volumeMount + return utils.AttachTLSVolumeMounts(cfg.Config.PodVarDir, &tlsVolumeMounts) } diff --git a/operator/prunecontroller/executor.go b/operator/prunecontroller/executor.go index ccd4c53e5..ad019fa83 100644 --- a/operator/prunecontroller/executor.go +++ b/operator/prunecontroller/executor.go @@ -17,8 +17,6 @@ import ( "github.com/k8up-io/k8up/v2/operator/job" ) -const _dataDirName = "k8up-dir" - // PruneExecutor will execute the batch.job for Prunes. type PruneExecutor struct { executor.Generic @@ -48,8 +46,8 @@ func (p *PruneExecutor) Execute(ctx context.Context) error { batchJob.Spec.Template.Spec.Containers[0].Env = p.setupEnvVars(ctx, p.prune) batchJob.Spec.Template.Spec.ServiceAccountName = cfg.Config.ServiceAccount p.prune.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = p.attachMoreVolumeMounts() - batchJob.Spec.Template.Spec.Volumes = p.attachMoreVolumes() + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = p.attachTLSVolumeMounts() + batchJob.Spec.Template.Spec.Volumes = utils.AttachTLSVolumes(p.prune.Spec.Volumes) batchJob.Labels[job.K8upExclusive] = "true" batchJob.Spec.Template.Spec.Containers[0].Args = p.setupArgs() @@ -74,7 +72,9 @@ func (p *PruneExecutor) setupArgs() []string { if len(p.prune.Spec.Retention.Tags) > 0 { args = append(args, executor.BuildTagArgs(p.prune.Spec.Retention.Tags)...) } - args = append(args, p.appendTLSOptionsArgs()...) + if p.prune.Spec.Backend != nil { + args = append(args, utils.AppendTLSOptionsArgs(p.prune.Spec.Backend.TLSOptions)...) + } return args } @@ -141,76 +141,11 @@ func (p *PruneExecutor) setupEnvVars(ctx context.Context, prune *k8upv1.Prune) [ return vars.Convert() } -func (p *PruneExecutor) appendTLSOptionsArgs() []string { - var args []string - if !(p.prune.Spec.Backend != nil && p.prune.Spec.Backend.TLSOptions != nil) { - return args - } - - if p.prune.Spec.Backend.TLSOptions.CACert != "" { - args = append(args, []string{"-caCert", p.prune.Spec.Backend.TLSOptions.CACert}...) - } - if p.prune.Spec.Backend.TLSOptions.ClientCert != "" && p.prune.Spec.Backend.TLSOptions.ClientKey != "" { - addMoreArgs := []string{ - "-clientCert", - p.prune.Spec.Backend.TLSOptions.ClientCert, - "-clientKey", - p.prune.Spec.Backend.TLSOptions.ClientKey, - } - args = append(args, addMoreArgs...) - } - - return args -} - -func (p *PruneExecutor) attachMoreVolumes() []corev1.Volume { - ku8pVolume := corev1.Volume{ - Name: _dataDirName, - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - } - - if utils.ZeroLen(p.prune.Spec.Volumes) { - return []corev1.Volume{ku8pVolume} - } - - moreVolumes := make([]corev1.Volume, 0, len(*p.prune.Spec.Volumes)+1) - moreVolumes = append(moreVolumes, ku8pVolume) - for _, v := range *p.prune.Spec.Volumes { - vol := v - - var volumeSource corev1.VolumeSource - if vol.PersistentVolumeClaim != nil { - volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim - } else if vol.Secret != nil { - volumeSource.Secret = vol.Secret - } else if vol.ConfigMap != nil { - volumeSource.ConfigMap = vol.ConfigMap - } else { - continue - } - - addVolume := corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - } - moreVolumes = append(moreVolumes, addVolume) - } - - return moreVolumes -} - -func (p *PruneExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { - var volumeMount []corev1.VolumeMount - +func (p *PruneExecutor) attachTLSVolumeMounts() []corev1.VolumeMount { + var tlsVolumeMounts []corev1.VolumeMount if p.prune.Spec.Backend != nil && !utils.ZeroLen(p.prune.Spec.Backend.VolumeMounts) { - volumeMount = *p.prune.Spec.Backend.VolumeMounts - } - - ku8pVolumeMount := corev1.VolumeMount{ - Name: _dataDirName, - MountPath: cfg.Config.PodVarDir, + tlsVolumeMounts = append(tlsVolumeMounts, *p.prune.Spec.Backend.VolumeMounts...) } - volumeMount = append(volumeMount, ku8pVolumeMount) - return volumeMount + return utils.AttachTLSVolumeMounts(cfg.Config.PodVarDir, &tlsVolumeMounts) } diff --git a/operator/restorecontroller/executor.go b/operator/restorecontroller/executor.go index 4ff464a17..d7464b08e 100644 --- a/operator/restorecontroller/executor.go +++ b/operator/restorecontroller/executor.go @@ -18,8 +18,8 @@ import ( ) const ( - restorePath = "/restore" - _dataDirName = "k8up-dir" + restorePath = "/restore" + certPrefixName = "restore" ) type RestoreExecutor struct { @@ -64,10 +64,7 @@ func (r *RestoreExecutor) cleanupOldRestores(ctx context.Context, restore *k8upv r.CleanupOldResources(ctx, &k8upv1.RestoreList{}, restore.Namespace, restore) } -func (r *RestoreExecutor) createRestoreObject( - ctx context.Context, - restore *k8upv1.Restore, -) (*batchv1.Job, error) { +func (r *RestoreExecutor) createRestoreObject(ctx context.Context, restore *k8upv1.Restore) (*batchv1.Job, error) { batchJob := &batchv1.Job{} batchJob.Name = r.jobName() batchJob.Namespace = restore.Namespace @@ -81,8 +78,8 @@ func (r *RestoreExecutor) createRestoreObject( restore.Spec.AppendEnvFromToContainer(&batchJob.Spec.Template.Spec.Containers[0]) volumes, volumeMounts := r.volumeConfig(restore) - batchJob.Spec.Template.Spec.Volumes = append(volumes, r.attachMoreVolumes()...) - batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = append(volumeMounts, r.attachMoreVolumeMounts()...) + batchJob.Spec.Template.Spec.Volumes = append(volumes, utils.AttachTLSVolumes(r.restore.Spec.Volumes)...) + batchJob.Spec.Template.Spec.Containers[0].VolumeMounts = append(volumeMounts, r.attachTLSVolumeMounts()...) args, argsErr := r.setupArgs(restore) batchJob.Spec.Template.Spec.Containers[0].Args = args @@ -119,15 +116,17 @@ func (r *RestoreExecutor) setupArgs(restore *k8upv1.Restore) ([]string, error) { return nil, fmt.Errorf("undefined restore method (-restoreType) on '%v/%v'", restore.Namespace, restore.Name) } - args = append(args, r.appendTLSOptionsArgs()...) + if r.restore.Spec.Backend != nil { + args = append(args, utils.AppendTLSOptionsArgs(r.restore.Spec.Backend.TLSOptions)...) + } + if r.restore.Spec.RestoreMethod != nil { + args = append(args, utils.AppendTLSOptionsArgs(r.restore.Spec.RestoreMethod.TLSOptions, certPrefixName)...) + } return args, nil } -func (r *RestoreExecutor) volumeConfig(restore *k8upv1.Restore) ( - []corev1.Volume, - []corev1.VolumeMount, -) { +func (r *RestoreExecutor) volumeConfig(restore *k8upv1.Restore) ([]corev1.Volume, []corev1.VolumeMount) { volumes := make([]corev1.Volume, 0) if restore.Spec.RestoreMethod.S3 == nil { addVolume := corev1.Volume{ @@ -183,110 +182,14 @@ func (r *RestoreExecutor) setupEnvVars(ctx context.Context, restore *k8upv1.Rest return vars.Convert() } -func (r *RestoreExecutor) appendTLSOptionsArgs() []string { - var args []string - - if r.restore.Spec.Backend != nil && r.restore.Spec.Backend.TLSOptions != nil { - if r.restore.Spec.Backend.TLSOptions.CACert != "" { - args = append(args, []string{"--caCert", r.restore.Spec.Backend.TLSOptions.CACert}...) - } - if r.restore.Spec.Backend.TLSOptions.ClientCert != "" && r.restore.Spec.Backend.TLSOptions.ClientKey != "" { - addMoreArgs := []string{ - "--clientCert", - r.restore.Spec.Backend.TLSOptions.ClientCert, - "--clientKey", - r.restore.Spec.Backend.TLSOptions.ClientKey, - } - args = append(args, addMoreArgs...) - } - } - - if r.restore.Spec.RestoreMethod != nil && r.restore.Spec.RestoreMethod.TLSOptions != nil { - if r.restore.Spec.RestoreMethod.TLSOptions.CACert != "" { - args = append(args, []string{"--restoreCaCert", r.restore.Spec.RestoreMethod.TLSOptions.CACert}...) - } - if r.restore.Spec.RestoreMethod.TLSOptions.ClientCert != "" && r.restore.Spec.RestoreMethod.TLSOptions.ClientKey != "" { - addMoreArgs := []string{ - "--restoreClientCert", - r.restore.Spec.RestoreMethod.TLSOptions.ClientCert, - "--restoreClientKey", - r.restore.Spec.RestoreMethod.TLSOptions.ClientKey, - } - args = append(args, addMoreArgs...) - } - } - - return args -} - -func (r *RestoreExecutor) attachMoreVolumes() []corev1.Volume { - ku8pVolume := corev1.Volume{ - Name: _dataDirName, - VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - } - - if utils.ZeroLen(r.restore.Spec.Volumes) { - return []corev1.Volume{ku8pVolume} - } - - moreVolumes := make([]corev1.Volume, 0, len(*r.restore.Spec.Volumes)+1) - moreVolumes = append(moreVolumes, ku8pVolume) - for _, v := range *r.restore.Spec.Volumes { - vol := v - - var volumeSource corev1.VolumeSource - if vol.PersistentVolumeClaim != nil { - volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim - } else if vol.Secret != nil { - volumeSource.Secret = vol.Secret - } else if vol.ConfigMap != nil { - volumeSource.ConfigMap = vol.ConfigMap - } else { - continue - } - - addVolume := corev1.Volume{ - Name: vol.Name, - VolumeSource: volumeSource, - } - moreVolumes = append(moreVolumes, addVolume) - } - - return moreVolumes -} - -func (r *RestoreExecutor) attachMoreVolumeMounts() []corev1.VolumeMount { - var volumeMount []corev1.VolumeMount - +func (r *RestoreExecutor) attachTLSVolumeMounts() []corev1.VolumeMount { + var tlsVolumeMounts []corev1.VolumeMount if r.restore.Spec.Backend != nil && !utils.ZeroLen(r.restore.Spec.Backend.VolumeMounts) { - volumeMount = append(volumeMount, *r.restore.Spec.Backend.VolumeMounts...) + tlsVolumeMounts = append(tlsVolumeMounts, *r.restore.Spec.Backend.VolumeMounts...) } if r.restore.Spec.RestoreMethod != nil && !utils.ZeroLen(r.restore.Spec.RestoreMethod.VolumeMounts) { - for _, v1 := range *r.restore.Spec.RestoreMethod.VolumeMounts { - vm1 := v1 - var isExist bool - - for _, v2 := range volumeMount { - vm2 := v2 - if vm1.Name == vm2.Name && vm1.MountPath == vm2.MountPath { - isExist = true - break - } - } - - if isExist { - continue - } - - volumeMount = append(volumeMount, vm1) - } - } - - addVolumeMount := corev1.VolumeMount{ - Name: _dataDirName, - MountPath: cfg.Config.PodVarDir, + tlsVolumeMounts = append(tlsVolumeMounts, *r.restore.Spec.RestoreMethod.VolumeMounts...) } - volumeMount = append(volumeMount, addVolumeMount) - return volumeMount + return utils.AttachTLSVolumeMounts(cfg.Config.PodVarDir, &tlsVolumeMounts) } diff --git a/operator/utils/utils.go b/operator/utils/utils.go index fba210a6f..37e1f2fa5 100644 --- a/operator/utils/utils.go +++ b/operator/utils/utils.go @@ -4,8 +4,14 @@ import ( "math/rand" "reflect" "time" + + corev1 "k8s.io/api/core/v1" + + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" ) +const _dataDirName = "k8up-dir" + func RandomStringGenerator(n int) string { var characters = []rune("abcdefghijklmnopqrstuvwxyz1234567890") rand.New(rand.NewSource(time.Now().UnixNano())) @@ -21,3 +27,115 @@ func ZeroLen(v interface{}) bool { (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) || (reflect.ValueOf(v).Kind() == reflect.Ptr && !reflect.ValueOf(v).IsNil() && reflect.ValueOf(v).Elem().Len() == 0) } + +func AppendTLSOptionsArgs(opts *k8upv1.TLSOptions, prefixArgName ...string) []string { + var args []string + if opts == nil { + return args + } + + var prefix string + for _, v := range prefixArgName { + prefix = v + } + + caCertArg := "-caCert" + clientCertArg := "-clientCert" + clientKeyArg := "-clientKey" + if prefix != "" { + caCertArg = "-" + prefix + "CaCert" + clientCertArg = "-" + prefix + "ClientCert" + clientKeyArg = "-" + prefix + "ClientKey" + } + + if opts.CACert != "" { + args = append(args, []string{caCertArg, opts.CACert}...) + } + if opts.ClientCert != "" && opts.ClientKey != "" { + addMoreArgs := []string{ + clientCertArg, + opts.ClientCert, + clientKeyArg, + opts.ClientKey, + } + args = append(args, addMoreArgs...) + } + + return args +} + +func AttachTLSVolumes(volumes *[]k8upv1.RunnableVolumeSpec) []corev1.Volume { + ku8pVolume := corev1.Volume{ + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + } + + if volumes == nil { + return []corev1.Volume{ku8pVolume} + } + + moreVolumes := make([]corev1.Volume, 0, len(*volumes)+1) + moreVolumes = append(moreVolumes, ku8pVolume) + for _, v := range *volumes { + vol := v + + var volumeSource corev1.VolumeSource + if vol.PersistentVolumeClaim != nil { + volumeSource.PersistentVolumeClaim = vol.PersistentVolumeClaim + } else if vol.Secret != nil { + volumeSource.Secret = vol.Secret + } else if vol.ConfigMap != nil { + volumeSource.ConfigMap = vol.ConfigMap + } else { + continue + } + + addVolume := corev1.Volume{ + Name: vol.Name, + VolumeSource: volumeSource, + } + moreVolumes = append(moreVolumes, addVolume) + } + + return moreVolumes +} + +func AttachTLSVolumeMounts(k8upPodVarDir string, volumeMounts ...*[]corev1.VolumeMount) []corev1.VolumeMount { + k8upVolumeMount := corev1.VolumeMount{ + Name: _dataDirName, + MountPath: k8upPodVarDir, + } + + if len(volumeMounts) == 0 { + return []corev1.VolumeMount{k8upVolumeMount} + } + + var moreVolumeMounts []corev1.VolumeMount + moreVolumeMounts = append(moreVolumeMounts, k8upVolumeMount) + for _, vm := range volumeMounts { + if vm == nil { + continue + } + + for _, v1 := range *vm { + vm1 := v1 + var isExist bool + + for _, v2 := range moreVolumeMounts { + vm2 := v2 + if vm1.Name == vm2.Name && vm1.MountPath == vm2.MountPath { + isExist = true + break + } + } + + if isExist { + continue + } + + moreVolumeMounts = append(moreVolumeMounts, vm1) + } + } + + return moreVolumeMounts +} From dc9f803e27a75c9878843d68ec028923ece92ef9 Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 00:52:56 +0330 Subject: [PATCH 32/38] [ADD] Add cmctl command for check cert-manager is ready Signed-off-by: poyaz --- e2e/Makefile | 4 ++++ e2e/lib/k8up.bash | 29 ++++++++++++++++++++++++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/e2e/Makefile b/e2e/Makefile index a1b3f97fb..dad967118 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -26,6 +26,7 @@ kind-load-image: kind-setup docker-build ## Load the e2e container image onto e2 .PHONY: e2e-setup e2e-setup: export KUBECONFIG = $(KIND_KUBECONFIG) e2e-setup: chart-prepare +e2e-setup: e2e-cmctl e2e-setup: e2e/node_modules kind-setup | $(e2etest_dir) ## Run the e2e setup .PHONY: clean @@ -42,3 +43,6 @@ $(e2etest_dir): e2e/node_modules: @npm --prefix ./e2e install + +e2e-cmctl: + @command -v cmctl > /dev/null || $(GO_EXEC) install github.com/cert-manager/cmctl/v2@latest diff --git a/e2e/lib/k8up.bash b/e2e/lib/k8up.bash index 0f4f574c9..3dbcb4674 100755 --- a/e2e/lib/k8up.bash +++ b/e2e/lib/k8up.bash @@ -221,7 +221,7 @@ give_self_signed_issuer() { ns=${NAMESPACE=${DETIK_CLIENT_NAMESPACE}} kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.14.4/cert-manager.yaml - + cmctl check api --wait=120s kubectl wait -n cert-manager --for=condition=Available deployment/cert-manager-webhook --timeout=120s yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/cert/issure.yaml | kubectl apply -f - yq $(yq --help | grep -q eval && echo e) '.metadata.namespace='\"${MINIO_NAMESPACE}\"'' definitions/cert/minio-ca.yaml | kubectl apply -f - @@ -301,6 +301,33 @@ given_an_existing_backup() { echo "✅ An existing backup is ready" } +given_an_existing_mtls_backup() { + require_args 2 ${#} + + local backup_file_name backup_file_content + backup_file_name=${1} + backup_file_content=${2} + given_a_subject "${backup_file_name}" "${backup_file_content}" + + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup-mtls.yaml | kubectl apply -f - + + wait_until backup/k8up-backup-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Backup named 'k8up-backup-mtls'" + + for i in {1..3}; do + run restic dump latest "/data/subject-pvc/${backup_file_name}" + if [ ! -z "${output}" ]; then + break + fi + done + + # shellcheck disable=SC2154 + [ "${backup_file_content}" = "${output}" ] + + echo "✅ An existing backup is ready" +} + verify_object_value_by_label() { require_args 5 ${#} From 7d121ffcbcc880cf10d716ea56d60897e2f6a02b Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 00:53:42 +0330 Subject: [PATCH 33/38] [ADD] Add two e2e test for restore and archive Signed-off-by: poyaz --- e2e/test-10-restore-self-signed-tls.bats | 35 +++++++++++++++++ e2e/test-11-archive-self-signed-tls.bats | 50 ++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 e2e/test-10-restore-self-signed-tls.bats create mode 100644 e2e/test-11-archive-self-signed-tls.bats diff --git a/e2e/test-10-restore-self-signed-tls.bats b/e2e/test-10-restore-self-signed-tls.bats new file mode 100644 index 000000000..87c864115 --- /dev/null +++ b/e2e/test-10-restore-self-signed-tls.bats @@ -0,0 +1,35 @@ +#!/usr/bin/env bats + +load "lib/utils" +load "lib/detik" +load "lib/k8up" + +# shellcheck disable=SC2034 +DETIK_CLIENT_NAME="kubectl" +# shellcheck disable=SC2034 +DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" +# shellcheck disable=SC2034 +DEBUG_DETIK="true" + +@test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_mtls_backup "${expected_filename}" "${expected_content}" + + # Restore + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-mtls' and verify that '.status.active' is '1'" + + wait_until restore/k8up-s3-mtls-restore-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-mtls'" + + expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" +} diff --git a/e2e/test-11-archive-self-signed-tls.bats b/e2e/test-11-archive-self-signed-tls.bats new file mode 100644 index 000000000..ab8e2f3fd --- /dev/null +++ b/e2e/test-11-archive-self-signed-tls.bats @@ -0,0 +1,50 @@ +#!/usr/bin/env bats + +load "lib/utils" +load "lib/detik" +load "lib/k8up" + +# shellcheck disable=SC2034 +DETIK_CLIENT_NAME="kubectl" +# shellcheck disable=SC2034 +DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" +# shellcheck disable=SC2034 +DEBUG_DETIK="true" + +@test "Given an existing Restic repository, When creating a Archive (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { + # Backup + expected_content="Old content for mtls: $(timestamp)" + expected_filename="old_file.txt" + given_a_running_operator + given_a_clean_ns + given_s3_storage + give_self_signed_issuer + given_an_existing_mtls_backup "${expected_filename}" "${expected_content}" + given_a_clean_archive archive + + # Archive + kubectl apply -f definitions/secrets + yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-mtls.yaml | kubectl apply -f - + + try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-mtls' and verify that '.status.started' is 'true'" + try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-mtls' and verify that '.status.active' is '1'" + + wait_until archive/k8up-s3-mtls-archive-mtls completed + verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-mtls'" + + run restic list snapshots + + echo "---BEGIN total restic snapshots output---" + total_snapshots=$(echo -e "${output}" | wc -l) + echo "${total_snapshots}" + echo "---END---" + + run mc ls minio/archive + + echo "---BEGIN total archives output---" + total_archives=$(echo -n -e "${output}" | wc -l) + echo "${total_archives}" + echo "---END---" + + [ "$total_snapshots" -eq "$total_archives" ] +} From 22de53ecd5e5bcd2b01df2edbcf7ece8896f69d7 Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 00:56:30 +0330 Subject: [PATCH 34/38] [DELETE] Delete e2e test self signed tls becuase it has too many test case and spend too much time Move restore and archive test case to two separated files Signed-off-by: poyaz --- e2e/test-10-self-signed-tls.bats | 580 ------------------------------- 1 file changed, 580 deletions(-) delete mode 100644 e2e/test-10-self-signed-tls.bats diff --git a/e2e/test-10-self-signed-tls.bats b/e2e/test-10-self-signed-tls.bats deleted file mode 100644 index 36db94037..000000000 --- a/e2e/test-10-self-signed-tls.bats +++ /dev/null @@ -1,580 +0,0 @@ -#!/usr/bin/env bats - -load "lib/utils" -load "lib/detik" -load "lib/k8up" - -# shellcheck disable=SC2034 -DETIK_CLIENT_NAME="kubectl" -# shellcheck disable=SC2034 -DETIK_CLIENT_NAMESPACE="k8up-e2e-subject" -# shellcheck disable=SC2034 -DEBUG_DETIK="true" - -### Start backup section - -@test "Given a PVC, When creating a Backup (TLS) of an app, Then expect Restic repository - using self-signed issuer" { - expected_content="expected content for tls: $(timestamp)" - expected_filename="expected_filename.txt" - - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_a_subject "${expected_filename}" "${expected_content}" - - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup-tls.yaml | kubectl apply -f - - - try "at most 10 times every 5s to get backup named 'k8up-backup-tls' and verify that '.status.started' is 'true'" - verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup-tls' '.status.active' 1 true - - wait_until backup/k8up-backup-tls completed - - run restic snapshots - - echo "---BEGIN restic snapshots output---" - echo "${output}" - echo "---END---" - - echo -n "Number of Snapshots >= 1? " - jq -e 'length >= 1' <<< "${output}" # Ensure that there was actually a backup created - - run get_latest_snap - - run restic dump "${output}" "/data/subject-pvc/${expected_filename}" - - echo "---BEGIN actual ${expected_filename}---" - echo "${output}" - echo "---END---" - - [ "${output}" = "${expected_content}" ] -} - -@test "Given a PVC, When creating a Backup (mTLS) of an app, Then expect Restic repository - using self-signed issuer" { - expected_content="expected content for mtls: $(timestamp)" - expected_filename="expected_filename.txt" - - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_a_subject "${expected_filename}" "${expected_content}" - - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup-mtls.yaml | kubectl apply -f - - - try "at most 10 times every 5s to get backup named 'k8up-backup-mtls' and verify that '.status.started' is 'true'" - verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup-mtls' '.status.active' 1 true - - wait_until backup/k8up-backup-mtls completed - - run restic snapshots - - echo "---BEGIN restic snapshots output---" - echo "${output}" - echo "---END---" - - echo -n "Number of Snapshots >= 1? " - jq -e 'length >= 1' <<< "${output}" # Ensure that there was actually a backup created - - run get_latest_snap - - run restic dump "${output}" "/data/subject-pvc/${expected_filename}" - - echo "---BEGIN actual ${expected_filename}---" - echo "${output}" - echo "---END---" - - [ "${output}" = "${expected_content}" ] -} - -@test "Given a PVC, When creating a Backup (mTLS with env) of an app, Then expect Restic repository - using self-signed issuer" { - expected_content="expected content for mtls: $(timestamp)" - expected_filename="expected_filename.txt" - - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_a_subject "${expected_filename}" "${expected_content}" - - kubectl apply -f definitions/secrets - kubectl apply -f definitions/backup/config-mtls-env.yaml - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/backup/backup-mtls-env.yaml | kubectl apply -f - - - try "at most 10 times every 5s to get backup named 'k8up-backup-mtls-env' and verify that '.status.started' is 'true'" - verify_object_value_by_label job 'k8up.io/owned-by=backup_k8up-backup-mtls-env' '.status.active' 1 true - - wait_until backup/k8up-backup-mtls-env completed - - run restic snapshots - - echo "---BEGIN restic snapshots output---" - echo "${output}" - echo "---END---" - - echo -n "Number of Snapshots >= 1? " - jq -e 'length >= 1' <<< "${output}" # Ensure that there was actually a backup created - - run get_latest_snap - - run restic dump "${output}" "/data/subject-pvc/${expected_filename}" - - echo "---BEGIN actual ${expected_filename}---" - echo "${output}" - echo "---END---" - - [ "${output}" = "${expected_content}" ] -} - -### End backup section - -### Start restore to pvc section - -@test "Given an existing Restic repository, When creating a Restore (TLS), Then Restore to PVC - using self-signed issuer" { - # Backup - expected_content="Old content for tls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Delete and create new subject - new_content="New content for tls: $(timestamp)" - new_filename="new_file.txt" - given_a_clean_ns - give_self_signed_issuer - given_a_subject "${new_filename}" "${new_content}" - - # Restore - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/restore-tls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Restore named 'k8up-restore-tls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-restore-tls' and verify that '.status.active' is '1'" - - wait_until restore/k8up-restore-tls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-restore-tls'" - - expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" - expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${new_filename}" "${new_content}" -} - -@test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to PVC - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Delete and create new subject - new_content="New content for mtls: $(timestamp)" - new_filename="new_file.txt" - given_a_clean_ns - give_self_signed_issuer - given_a_subject "${new_filename}" "${new_content}" - - # Restore - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/restore-mtls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Restore named 'k8up-restore-mtls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-restore-mtls' and verify that '.status.active' is '1'" - - wait_until restore/k8up-restore-mtls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-restore-mtls'" - - expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" - expect_file_in_container 'deploy/subject-deployment' 'subject-container' "/data/${new_filename}" "${new_content}" -} - -### End restore to pvc section - -### Start restore to s3 section - -@test "Given an existing Restic repository, When creating a Restore (TLS), Then Restore to S3 (TLS) - using self-signed issuer" { - # Backup - expected_content="Old content for tls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Restore - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-tls-restore-tls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Restore named 'k8up-s3-tls-restore-tls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-tls-restore-tls' and verify that '.status.active' is '1'" - - wait_until restore/k8up-s3-tls-restore-tls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-tls-restore-tls'" - - expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" -} - -@test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to S3 (TLS) - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Restore - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-tls-restore-mtls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Restore named 'k8up-s3-tls-restore-mtls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-tls-restore-mtls' and verify that '.status.active' is '1'" - - wait_until restore/k8up-s3-tls-restore-mtls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-tls-restore-mtls'" - - expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" -} - -@test "Given an existing Restic repository, When creating a Restore (TLS), Then Restore to S3 (mTLS) - using self-signed issuer" { - # Backup - expected_content="Old content for tls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Restore - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-tls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-tls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-tls' and verify that '.status.active' is '1'" - - wait_until restore/k8up-s3-mtls-restore-tls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-tls'" - - expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" -} - -@test "Given an existing Restic repository, When creating a Restore (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Restore - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-mtls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-mtls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-mtls' and verify that '.status.active' is '1'" - - wait_until restore/k8up-s3-mtls-restore-mtls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-mtls'" - - expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" -} - -@test "Given an existing Restic repository, When creating a Restore (mTLS with env), Then Restore to S3 (mTLS with env) - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Restore - kubectl apply -f definitions/secrets - kubectl apply -f definitions/restore/config-mtls-env.yaml - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/restore/s3-mtls-restore-mtls-env.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Restore named 'k8up-s3-mtls-restore-mtls-env' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-restore-mtls-env' and verify that '.status.active' is '1'" - - wait_until restore/k8up-s3-mtls-restore-mtls-env completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Restore named 'k8up-s3-mtls-restore-mtls-env'" - - expect_dl_file_in_container 'deploy/subject-dl-deployment' 'subject-container' "/data/${expected_filename}" "${expected_content}" -} - -### End restore to s3 section - -### Start archive to s3 section - -@test "Given an existing Restic repository, When creating a Archive (TLS), Then Restore to S3 (TLS) - using self-signed issuer" { - # Backup - expected_content="Old content for tls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - given_a_clean_archive archive - - # Archive - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-tls-archive-tls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Archive named 'k8up-s3-tls-archive-tls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-tls-archive-tls' and verify that '.status.active' is '1'" - - wait_until archive/k8up-s3-tls-archive-tls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-tls-archive-tls'" - - run restic list snapshots - - echo "---BEGIN total restic snapshots output---" - total_snapshots=$(echo -e "${output}" | wc -l) - echo "${total_snapshots}" - echo "---END---" - - run mc ls minio/archive - - echo "---BEGIN total archives output---" - total_archives=$(echo -n -e "${output}" | wc -l) - echo "${total_archives}" - echo "---END---" - - [ "$total_snapshots" -eq "$total_archives" ] -} - -@test "Given an existing Restic repository, When creating a Archive (mTLS), Then Restore to S3 (TLS) - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - given_a_clean_archive archive - - # Archive - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-tls-archive-mtls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Archive named 'k8up-s3-tls-archive-mtls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-tls-archive-mtls' and verify that '.status.active' is '1'" - - wait_until archive/k8up-s3-tls-archive-mtls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-tls-archive-mtls'" - - run restic list snapshots - - echo "---BEGIN total restic snapshots output---" - total_snapshots=$(echo -e "${output}" | wc -l) - echo "${total_snapshots}" - echo "---END---" - - run mc ls minio/archive - - echo "---BEGIN total archives output---" - total_archives=$(echo -n -e "${output}" | wc -l) - echo "${total_archives}" - echo "---END---" - - [ "$total_snapshots" -eq "$total_archives" ] -} - -@test "Given an existing Restic repository, When creating a Archive (TLS), Then Restore to S3 (mTLS) - using self-signed issuer" { - # Backup - expected_content="Old content for tls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - given_a_clean_archive archive - - # Archive - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-tls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-tls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-tls' and verify that '.status.active' is '1'" - - wait_until archive/k8up-s3-mtls-archive-tls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-tls'" - - run restic list snapshots - - echo "---BEGIN total restic snapshots output---" - total_snapshots=$(echo -e "${output}" | wc -l) - echo "${total_snapshots}" - echo "---END---" - - run mc ls minio/archive - - echo "---BEGIN total archives output---" - total_archives=$(echo -n -e "${output}" | wc -l) - echo "${total_archives}" - echo "---END---" - - [ "$total_snapshots" -eq "$total_archives" ] -} - -@test "Given an existing Restic repository, When creating a Archive (mTLS), Then Restore to S3 (mTLS) - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - given_a_clean_archive archive - - # Archive - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-mtls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-mtls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-mtls' and verify that '.status.active' is '1'" - - wait_until archive/k8up-s3-mtls-archive-mtls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-mtls'" - - run restic list snapshots - - echo "---BEGIN total restic snapshots output---" - total_snapshots=$(echo -e "${output}" | wc -l) - echo "${total_snapshots}" - echo "---END---" - - run mc ls minio/archive - - echo "---BEGIN total archives output---" - total_archives=$(echo -n -e "${output}" | wc -l) - echo "${total_archives}" - echo "---END---" - - [ "$total_snapshots" -eq "$total_archives" ] -} - -@test "Given an existing Restic repository, When creating a Archive (mTLS with env), Then Restore to S3 (mTLS with env) - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - given_a_clean_archive archive - - # Archive - kubectl apply -f definitions/secrets - kubectl apply -f definitions/archive/config-mtls-env.yaml - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/archive/s3-mtls-archive-mtls-env.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Archive named 'k8up-s3-mtls-archive-mtls-env' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-s3-mtls-archive-mtls-env' and verify that '.status.active' is '1'" - - wait_until archive/k8up-s3-mtls-archive-mtls-env completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Archive named 'k8up-s3-mtls-archive-mtls-env'" - - run restic list snapshots - - echo "---BEGIN total restic snapshots output---" - total_snapshots=$(echo -e "${output}" | wc -l) - echo "${total_snapshots}" - echo "---END---" - - run mc ls minio/archive - - echo "---BEGIN total archives output---" - total_archives=$(echo -n -e "${output}" | wc -l) - echo "${total_archives}" - echo "---END---" - - [ "$total_snapshots" -eq "$total_archives" ] -} - -### End archive to s3 section - -### Start check section - -@test "Given a PVC, When creating a Check (TLS) of an app, Then expect Restic repository - using self-signed issuer" { - # Backup - expected_content="Old content for tls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Check - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/check/check-tls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Check named 'k8up-check-tls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-check-tls' and verify that '.status.active' is '1'" - - wait_until check/k8up-check-tls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Check named 'k8up-check-tls'" -} - -@test "Given a PVC, When creating a Check (mTLS) of an app, Then expect Restic repository - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Check - kubectl apply -f definitions/secrets - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/check/check-mtls.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Check named 'k8up-check-mtls' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-check-mtls' and verify that '.status.active' is '1'" - - wait_until check/k8up-check-mtls completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Check named 'k8up-check-mtls'" -} - -@test "Given a PVC, When creating a Check (mTLS with env) of an app, Then expect Restic repository - using self-signed issuer" { - # Backup - expected_content="Old content for mtls: $(timestamp)" - expected_filename="old_file.txt" - given_a_running_operator - given_a_clean_ns - given_s3_storage - give_self_signed_issuer - given_an_existing_backup "${expected_filename}" "${expected_content}" - - # Check - kubectl apply -f definitions/secrets - kubectl apply -f definitions/check/config-mtls-env.yaml - yq e '.spec.podSecurityContext.fsGroup='$(id -u)' | .spec.podSecurityContext.runAsUser='$(id -u)'' definitions/check/check-mtls-env.yaml | kubectl apply -f - - - try "at most 10 times every 1s to get Check named 'k8up-check-mtls-env' and verify that '.status.started' is 'true'" - try "at most 10 times every 1s to get Job named 'k8up-check-mtls-env' and verify that '.status.active' is '1'" - - wait_until check/k8up-check-mtls-env completed - verify "'.status.conditions[?(@.type==\"Completed\")].reason' is 'Succeeded' for Check named 'k8up-check-mtls-env'" -} - -### End check section From df889cb4622fe51c5bfb739b5fc2610b8d68425b Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 17:14:02 +0330 Subject: [PATCH 35/38] [UPDATE] Add unit test for utils file and refactoring ZeroLen function Signed-off-by: poyaz --- operator/utils/utils.go | 23 +- operator/utils/utils_test.go | 583 +++++++++++++++++++++++++++++++++++ 2 files changed, 603 insertions(+), 3 deletions(-) create mode 100644 operator/utils/utils_test.go diff --git a/operator/utils/utils.go b/operator/utils/utils.go index 37e1f2fa5..006cf975b 100644 --- a/operator/utils/utils.go +++ b/operator/utils/utils.go @@ -23,9 +23,26 @@ func RandomStringGenerator(n int) string { } func ZeroLen(v interface{}) bool { - return v == nil || - (reflect.ValueOf(v).Kind() == reflect.Ptr && reflect.ValueOf(v).IsNil()) || - (reflect.ValueOf(v).Kind() == reflect.Ptr && !reflect.ValueOf(v).IsNil() && reflect.ValueOf(v).Elem().Len() == 0) + if v == nil { + return true + } + + vv := reflect.ValueOf(v) + if vv.Kind() == reflect.Ptr { + if vv.IsNil() { + return true + } + vv = vv.Elem() + } + if !(vv.IsValid() && !vv.IsZero()) { + return true + } + switch vv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return vv.Len() == 0 + } + + return true } func AppendTLSOptionsArgs(opts *k8upv1.TLSOptions, prefixArgName ...string) []string { diff --git a/operator/utils/utils_test.go b/operator/utils/utils_test.go new file mode 100644 index 000000000..190d0ecb9 --- /dev/null +++ b/operator/utils/utils_test.go @@ -0,0 +1,583 @@ +package utils + +import ( + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" + corev1 "k8s.io/api/core/v1" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/utils/ptr" +) + +func Test_RandomStringGenerator(t *testing.T) { + type args struct { + n int + } + + tests := []struct { + name string + args args + want int + }{ + { + name: "return random string with length zero", + args: args{n: 0}, + }, + { + name: "return random string with length one", + args: args{n: 1}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Lenf(t, RandomStringGenerator(tt.args.n), tt.args.n, "RandomStringGenerator(%v)", tt.args.n) + }) + } +} + +func Test_ZeroLen(t *testing.T) { + type args struct { + v interface{} + } + + type sd struct { + StrPtrNil *string + StrPtrEmpty *string + StrPtrFill *string + StrEmpty string + StrFill string + SlicePtrNil *[]string + SlicePtrEmpty *[]string + SlicePtrFill *[]string + SliceEmpty []string + SliceFill []string + MapPtrNil *map[string]string + MapPtrEmpty *map[string]string + MapPtrFill *map[string]string + MapEmpty map[string]string + MapFill map[string]string + ArrayPtrNil *[1]string + ArrayPtrEmpty *[1]string + ArrayPtrFill *[1]string + ArrayEmpty [1]string + ArrayFill [1]string + IntPtrNil *int + IntPtrEmpty *int + IntPtrFill *int + } + s := sd{ + StrPtrEmpty: ptr.To(""), + StrPtrFill: ptr.To("this-is-test"), + StrFill: "this-is-test", + SlicePtrEmpty: ptr.To([]string{}), + SlicePtrFill: ptr.To([]string{""}), + SliceFill: []string{""}, + MapPtrEmpty: ptr.To(map[string]string{}), + MapPtrFill: ptr.To(map[string]string{"": ""}), + MapFill: map[string]string{"": ""}, + ArrayPtrEmpty: ptr.To([1]string{}), + ArrayPtrFill: ptr.To([1]string{"this-is-test"}), + ArrayFill: [1]string{"this-is-test"}, + IntPtrEmpty: ptr.To(0), + IntPtrFill: ptr.To(12), + } + + tests := []struct { + name string + args args + want bool + }{ + { + name: "return true when value is nil", + args: args{v: nil}, + want: true, + }, + { + name: "return true when value is nil string pointer", + args: args{v: s.StrPtrNil}, + want: true, + }, + { + name: "return true when value is empty string pointer", + args: args{v: s.StrPtrEmpty}, + want: true, + }, + { + name: "return false when value is not empty string pointer", + args: args{v: s.StrPtrFill}, + want: false, + }, + { + name: "return true when value is empty string", + args: args{v: s.StrEmpty}, + want: true, + }, + { + name: "return false when value is not empty string", + args: args{v: s.StrFill}, + want: false, + }, + { + name: "return true when value is nil slice pointer", + args: args{v: s.SlicePtrNil}, + want: true, + }, + { + name: "return true when value is empty slice pointer", + args: args{v: s.SlicePtrEmpty}, + want: true, + }, + { + name: "return false when value is not empty slice pointer", + args: args{v: s.SlicePtrFill}, + want: false, + }, + { + name: "return true when value is empty slice", + args: args{v: s.SliceEmpty}, + want: true, + }, + { + name: "return false when value is not empty slice", + args: args{v: s.SliceFill}, + want: false, + }, + { + name: "return true when value is nil map pointer", + args: args{v: s.MapPtrNil}, + want: true, + }, + { + name: "return true when value is empty map pointer", + args: args{v: s.MapPtrEmpty}, + want: true, + }, + { + name: "return false when value is not empty map pointer", + args: args{v: s.MapPtrFill}, + want: false, + }, + { + name: "return true when value is empty map", + args: args{v: s.MapEmpty}, + want: true, + }, + { + name: "return false when value is not empty map", + args: args{v: s.MapFill}, + want: false, + }, + { + name: "return true when value is nil array pointer", + args: args{v: s.ArrayPtrNil}, + want: true, + }, + { + name: "return true when value is empty array pointer", + args: args{v: s.ArrayPtrEmpty}, + want: true, + }, + { + name: "return false when value is not empty array pointer", + args: args{v: s.ArrayPtrFill}, + want: false, + }, + { + name: "return true when value is empty array", + args: args{v: s.ArrayEmpty}, + want: true, + }, + { + name: "return false when value is not empty array", + args: args{v: s.ArrayFill}, + want: false, + }, + { + name: "return true when value is nil int pointer", + args: args{v: s.IntPtrNil}, + want: true, + }, + { + name: "return true when value is empty int pointer", + args: args{v: s.IntPtrEmpty}, + want: true, + }, + { + name: "return false when value is not empty int pointer", + args: args{v: s.IntPtrFill}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, ZeroLen(tt.args.v), "ZeroLen(%v)", tt.args.v) + }) + } +} + +func Test_AppendTLSOptionsArgs(t *testing.T) { + type args struct { + opts *k8upv1.TLSOptions + prefixArgName []string + } + + tests := []struct { + name string + args args + want []string + }{ + { + name: "return empty args when tlsOptions is nil", + args: args{}, + want: []string(nil), + }, + { + name: "return empty args when tlsOptions is nil (with prefix)", + args: args{prefixArgName: []string{"restore"}}, + want: []string(nil), + }, + { + name: "return args with caCert when tlsOptions has property caCert", + args: args{opts: &k8upv1.TLSOptions{CACert: "/path/of/ca.cert"}}, + want: []string{"-caCert", "/path/of/ca.cert"}, + }, + { + name: "return args with caCert when tlsOptions has property caCert (with prefix)", + args: args{ + opts: &k8upv1.TLSOptions{CACert: "/path/of/ca.cert"}, + prefixArgName: []string{"restore"}, + }, + want: []string{"-restoreCaCert", "/path/of/ca.cert"}, + }, + { + name: "return args with caCert when tlsOptions has property caCert and pick last index of prefix", + args: args{ + opts: &k8upv1.TLSOptions{CACert: "/path/of/ca.cert"}, + prefixArgName: []string{"restore0", "restore1", "restore2"}, + }, + want: []string{"-restore2CaCert", "/path/of/ca.cert"}, + }, + { + name: "return args with caCert when tlsOptions have properties caCert, clientCert", + args: args{ + opts: &k8upv1.TLSOptions{ + CACert: "/path/of/ca.cert", + ClientCert: "/path/of/client.crt", + }, + }, + want: []string{"-caCert", "/path/of/ca.cert"}, + }, + { + name: "return args with caCert when tlsOptions have properties caCert, clientCert (with prefix)", + args: args{ + opts: &k8upv1.TLSOptions{ + CACert: "/path/of/ca.cert", + ClientCert: "/path/of/client.crt", + }, + prefixArgName: []string{"restore"}, + }, + want: []string{"-restoreCaCert", "/path/of/ca.cert"}, + }, + { + name: "return args with caCert when tlsOptions have properties caCert, clientKey", + args: args{ + opts: &k8upv1.TLSOptions{ + CACert: "/path/of/ca.cert", + ClientKey: "/path/of/client.key", + }, + }, + want: []string{"-caCert", "/path/of/ca.cert"}, + }, + { + name: "return args with caCert when tlsOptions have properties caCert, clientKey (with prefix)", + args: args{ + opts: &k8upv1.TLSOptions{ + CACert: "/path/of/ca.cert", + ClientKey: "/path/of/client.key", + }, + prefixArgName: []string{"restore"}, + }, + want: []string{"-restoreCaCert", "/path/of/ca.cert"}, + }, + { + name: "return args with caCert when tlsOptions have properties caCert, clientCert, clientKey", + args: args{ + opts: &k8upv1.TLSOptions{ + CACert: "/path/of/ca.cert", + ClientCert: "/path/of/client.crt", + ClientKey: "/path/of/client.key", + }, + }, + want: []string{"-caCert", "/path/of/ca.cert", "-clientCert", "/path/of/client.crt", "-clientKey", "/path/of/client.key"}, + }, + { + name: "return args with caCert when tlsOptions have properties caCert, clientCert, clientKey (with prefix)", + args: args{ + opts: &k8upv1.TLSOptions{ + CACert: "/path/of/ca.cert", + ClientCert: "/path/of/client.crt", + ClientKey: "/path/of/client.key", + }, + prefixArgName: []string{"restore"}, + }, + want: []string{"-restoreCaCert", "/path/of/ca.cert", "-restoreClientCert", "/path/of/client.crt", "-restoreClientKey", "/path/of/client.key"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, AppendTLSOptionsArgs(tt.args.opts, tt.args.prefixArgName...), "AppendTLSOptionsArgs(%v, %v)", tt.args.opts, tt.args.prefixArgName) + }) + } +} + +func Test_AttachTLSVolumes(t *testing.T) { + type args struct { + volumes *[]k8upv1.RunnableVolumeSpec + } + + tests := []struct { + name string + args args + want []corev1.Volume + }{ + { + name: "return volumes contain k8up volume when volumes arg is nil", + args: args{}, + want: []corev1.Volume{ + { + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + }, + }, + { + name: "return volumes contain k8up volume when volumes arg is empty", + args: args{ + volumes: &[]k8upv1.RunnableVolumeSpec{}, + }, + want: []corev1.Volume{ + { + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + }, + }, + { + name: "return volumes contain k8up volume when volumes arg contains volume with only name", + args: args{ + volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: "volume", + }, + }, + }, + want: []corev1.Volume{ + { + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + }, + }, + { + name: "return volumes contain k8up volume and PersistentVolumeClaim", + args: args{ + volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: "pvc", + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "claimName", + ReadOnly: true, + }, + }, + }, + }, + want: []corev1.Volume{ + { + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + { + Name: "pvc", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "claimName", + ReadOnly: true, + }, + }, + }, + }, + }, + { + name: "return volumes contain k8up volume and SecretVolumeSource", + args: args{ + volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: "secret", + Secret: &corev1.SecretVolumeSource{ + SecretName: "secretName", + }, + }, + }, + }, + want: []corev1.Volume{ + { + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + { + Name: "secret", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "secretName", + }, + }, + }, + }, + }, + { + name: "return volumes contain k8up volume and ConfigMapVolumeSource", + args: args{ + volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: "config", + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "configMap", + }, + }, + }, + }, + }, + want: []corev1.Volume{ + { + Name: _dataDirName, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + { + Name: "config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "configMap", + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equalf(t, tt.want, AttachTLSVolumes(tt.args.volumes), "AttachTLSVolumes(%v)", tt.args.volumes) + }) + } +} + +func Test_AttachTLSVolumeMounts(t *testing.T) { + k8upPath := "/k8up" + type args struct { + k8upPodVarDir string + volumeMounts *[]corev1.VolumeMount + addNilVolumeMounts bool + } + + tests := []struct { + name string + args args + want []corev1.VolumeMount + }{ + { + name: "return volume mounts contain k8up mount when volume mounts arg is nil", + args: args{k8upPodVarDir: k8upPath}, + want: []corev1.VolumeMount{ + { + Name: _dataDirName, + MountPath: k8upPath, + }, + }, + }, + { + name: "return volume mounts contain k8up mount when volume mounts arg is empty", + args: args{k8upPodVarDir: k8upPath, volumeMounts: &[]corev1.VolumeMount{}}, + want: []corev1.VolumeMount{ + { + Name: _dataDirName, + MountPath: k8upPath, + }, + }, + }, + { + name: "return volume mounts contain k8up mount when call with more volume mounts", + args: args{ + k8upPodVarDir: k8upPath, + volumeMounts: &[]corev1.VolumeMount{}, + addNilVolumeMounts: true, + }, + want: []corev1.VolumeMount{ + { + Name: _dataDirName, + MountPath: k8upPath, + }, + }, + }, + { + name: "return volume mounts contain k8up mount and a the one volume is mounted", + args: args{ + k8upPodVarDir: k8upPath, + volumeMounts: &[]corev1.VolumeMount{ + { + Name: "minio-client-mtls", + MountPath: "/mnt/tls/", + }, + }, + }, + want: []corev1.VolumeMount{ + { + Name: _dataDirName, + MountPath: k8upPath, + }, + { + Name: "minio-client-mtls", + MountPath: "/mnt/tls/", + }, + }, + }, + { + name: "return volume mounts contain k8up mount and a the one volume is mounted (remove duplicate)", + args: args{ + k8upPodVarDir: k8upPath, + volumeMounts: &[]corev1.VolumeMount{ + { + Name: "minio-client-mtls", + MountPath: "/mnt/tls/", + }, + { + Name: "minio-client-mtls", + MountPath: "/mnt/tls/", + }, + }, + }, + want: []corev1.VolumeMount{ + { + Name: _dataDirName, + MountPath: k8upPath, + }, + { + Name: "minio-client-mtls", + MountPath: "/mnt/tls/", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.args.addNilVolumeMounts { + assert.Equalf(t, tt.want, AttachTLSVolumeMounts(tt.args.k8upPodVarDir, tt.args.volumeMounts, nil), "Test_AttachTLSVolumeMounts(%v, %v, %v)", tt.args.k8upPodVarDir, tt.args.volumeMounts, nil) + } else if tt.args.volumeMounts != nil { + assert.Equalf(t, tt.want, AttachTLSVolumeMounts(tt.args.k8upPodVarDir, tt.args.volumeMounts), "Test_AttachTLSVolumeMounts(%v, %v)", tt.args.k8upPodVarDir, tt.args.volumeMounts) + } else { + assert.Equalf(t, tt.want, AttachTLSVolumeMounts(tt.args.k8upPodVarDir), "Test_AttachTLSVolumeMounts(%v)", tt.args.k8upPodVarDir) + } + }) + } +} From 1fc3b16762c5f9e9c855ff2b1c64d61bfd869f71 Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 17:32:05 +0330 Subject: [PATCH 36/38] [UPDATE] Rename argument "--varDir" to "-varDir" Signed-off-by: poyaz --- operator/backupcontroller/backup_utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/operator/backupcontroller/backup_utils.go b/operator/backupcontroller/backup_utils.go index eed9be5cf..98780a563 100644 --- a/operator/backupcontroller/backup_utils.go +++ b/operator/backupcontroller/backup_utils.go @@ -75,7 +75,7 @@ func (b *BackupExecutor) createServiceAccountAndBinding(ctx context.Context) err } func (b *BackupExecutor) setupArgs() []string { - args := []string{"--varDir", cfg.Config.PodVarDir} + args := []string{"-varDir", cfg.Config.PodVarDir} if len(b.backup.Spec.Tags) > 0 { args = append(args, executor.BuildTagArgs(b.backup.Spec.Tags)...) } From ad78959708e69bc555c15b8ed6d37b97869c7790 Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 17:32:40 +0330 Subject: [PATCH 37/38] [FIX] Fix execute ps for alpine and BusyBox Signed-off-by: poyaz --- clean.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index 7dac2fc29..a637aef92 100755 --- a/clean.sh +++ b/clean.sh @@ -9,7 +9,12 @@ pidfile_exists() { } pid_alive() { - xargs ps -p >/dev/null < "${1}" + if ps --help 2>&1 | grep -q BusyBox; then + xargs ps p >/dev/null < "${1}" + else + xargs ps -p >/dev/null < "${1}" + fi + return $? } From 80b2dddf80364c914a3535d54de92a0c96aa95db Mon Sep 17 00:00:00 2001 From: poyaz Date: Thu, 11 Apr 2024 17:33:31 +0330 Subject: [PATCH 38/38] [ADD] Add integration test for TLS and Mutual TLS options Signed-off-by: poyaz --- .../controller_integration_test.go | 31 ++++ .../controller_utils_integration_test.go | 133 ++++++++++++++ .../controller_integration_test.go | 24 +++ .../controller_utils_integration_test.go | 172 ++++++++++++++++++ 4 files changed, 360 insertions(+) create mode 100644 operator/checkcontroller/controller_utils_integration_test.go diff --git a/operator/backupcontroller/controller_integration_test.go b/operator/backupcontroller/controller_integration_test.go index bcd6913ba..7bf7ce6f0 100644 --- a/operator/backupcontroller/controller_integration_test.go +++ b/operator/backupcontroller/controller_integration_test.go @@ -138,6 +138,37 @@ func (ts *BackupTestSuite) Test_GivenBackupWithSecurityContext_ExpectBackupJobWi ts.Assert().Equal(int64(500), *backupJob.Spec.ActiveDeadlineSeconds) } +func (ts *BackupTestSuite) Test_GivenBackupWithTlsOptions_ExpectBackupJobWithTlsOptions() { + ts.BackupResource = ts.newBackupTls() + pvc := ts.newPvc("test-pvc", corev1.ReadWriteMany) + ts.EnsureResources(ts.BackupResource, pvc) + + pvc.Status.Phase = corev1.ClaimBound + ts.UpdateStatus(pvc) + + result := ts.whenReconciling(ts.BackupResource) + ts.Require().GreaterOrEqual(result.RequeueAfter, 30*time.Second) + + backupJob := ts.expectABackupJob() + ts.Assert().NotNil(backupJob.Spec.Template.Spec.Volumes) + ts.assertBackupTlsVolumeAndTlsOptions(backupJob) +} + +func (ts *BackupTestSuite) Test_GivenBackupWithMutualTlsOptions_ExpectBackupJobWithMutualTlsOptions() { + ts.BackupResource = ts.newBackupMutualTls() + pvc := ts.newPvc("test-pvc", corev1.ReadWriteMany) + ts.EnsureResources(ts.BackupResource, pvc) + + pvc.Status.Phase = corev1.ClaimBound + ts.UpdateStatus(pvc) + + result := ts.whenReconciling(ts.BackupResource) + ts.Require().GreaterOrEqual(result.RequeueAfter, 30*time.Second) + + backupJob := ts.expectABackupJob() + ts.assertBackupMutualTlsVolumeAndMutualTlsOptions(backupJob) +} + func (ts *BackupTestSuite) Test_GivenPreBackupPods_ExpectPreBackupDeployment() { ts.EnsureResources(ts.BackupResource, ts.newPreBackupPod()) diff --git a/operator/backupcontroller/controller_utils_integration_test.go b/operator/backupcontroller/controller_utils_integration_test.go index 9c07f6d84..3983e8ac3 100644 --- a/operator/backupcontroller/controller_utils_integration_test.go +++ b/operator/backupcontroller/controller_utils_integration_test.go @@ -4,6 +4,8 @@ package backupcontroller import ( "context" + "github.com/k8up-io/k8up/v2/operator/cfg" + "k8s.io/utils/ptr" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -23,6 +25,18 @@ import ( const ( backupTag = "integrationTag" + + backupTlsVolumeName = "minio-client-tls" + backupTlsVolumeSecretName = "minio-client-tls" + backupTlsVolumeMount = "/mnt/tls" + backupTlsCaCertPath = backupTlsVolumeMount + "/ca.cert" + + backupMutualTlsVolumeName = "minio-client-mtls" + backupMutualTlsVolumeSecretName = "minio-client-mtls" + backupMutualTlsVolumeMount = "/mnt/mtls" + backupMutualTlsCaCertPath = backupMutualTlsVolumeMount + "/ca.cert" + backupMutualTlsClientCertPath = backupMutualTlsVolumeMount + "/client.cert" + backupMutualTlsKeyCertPath = backupMutualTlsVolumeMount + "/client.key" ) func (ts *BackupTestSuite) newPvc(name string, accessMode corev1.PersistentVolumeAccessMode) *corev1.PersistentVolumeClaim { @@ -134,6 +148,125 @@ func (ts *BackupTestSuite) newBackup() *k8upv1.Backup { } } +func (ts *BackupTestSuite) newBackupTls() *k8upv1.Backup { + return &k8upv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup", + Namespace: ts.NS, + UID: uuid.NewUUID(), + }, + Spec: k8upv1.BackupSpec{ + RunnableSpec: k8upv1.RunnableSpec{ + Backend: &k8upv1.Backend{ + TLSOptions: &k8upv1.TLSOptions{CACert: backupTlsCaCertPath}, + VolumeMounts: &[]corev1.VolumeMount{ + { + Name: backupTlsVolumeName, + MountPath: backupTlsVolumeMount, + }, + }, + }, + Volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: backupTlsVolumeName, + Secret: &corev1.SecretVolumeSource{ + SecretName: backupTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + }, + }, + }, + } +} + +func (ts *BackupTestSuite) assertBackupTlsVolumeAndTlsOptions(job *batchv1.Job) { + expectArgs := []string{"-varDir", cfg.Config.PodVarDir, "-caCert", backupTlsCaCertPath} + expectVolumeMount := corev1.VolumeMount{Name: backupTlsVolumeName, MountPath: backupTlsVolumeMount} + expectVolume := corev1.Volume{ + Name: backupTlsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: backupTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + } + + jobArguments := job.Spec.Template.Spec.Containers[0].Args + ts.Assert().Equal(jobArguments, expectArgs, "backup tls contains caCert path in job args") + jobVolumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts + ts.Assert().NotNil(jobVolumeMounts) + ts.Assert().Contains(jobVolumeMounts, expectVolumeMount, "backup ca cert in job volume mount") + jobVolumes := job.Spec.Template.Spec.Volumes + ts.Assert().NotNil(jobVolumes) + ts.Assert().Contains(jobVolumes, expectVolume, "backup ca cert in job volume mount") +} + +func (ts *BackupTestSuite) newBackupMutualTls() *k8upv1.Backup { + return &k8upv1.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup", + Namespace: ts.NS, + UID: uuid.NewUUID(), + }, + Spec: k8upv1.BackupSpec{ + RunnableSpec: k8upv1.RunnableSpec{ + Backend: &k8upv1.Backend{ + TLSOptions: &k8upv1.TLSOptions{ + CACert: backupMutualTlsCaCertPath, + ClientCert: backupMutualTlsClientCertPath, + ClientKey: backupMutualTlsKeyCertPath, + }, + VolumeMounts: &[]corev1.VolumeMount{ + { + Name: backupMutualTlsVolumeName, + MountPath: backupMutualTlsVolumeMount, + }, + }, + }, + Volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: backupMutualTlsVolumeName, + Secret: &corev1.SecretVolumeSource{ + SecretName: backupMutualTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + }, + }, + }, + } +} + +func (ts *BackupTestSuite) assertBackupMutualTlsVolumeAndMutualTlsOptions(job *batchv1.Job) { + expectArgs := []string{ + "-varDir", cfg.Config.PodVarDir, + "-caCert", backupMutualTlsCaCertPath, + "-clientCert", backupMutualTlsClientCertPath, + "-clientKey", backupMutualTlsKeyCertPath, + } + expectVolumeMount := corev1.VolumeMount{Name: backupMutualTlsVolumeName, MountPath: backupMutualTlsVolumeMount} + expectVolume := corev1.Volume{ + Name: backupMutualTlsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: backupMutualTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + } + + jobArguments := job.Spec.Template.Spec.Containers[0].Args + ts.Assert().Equal(jobArguments, expectArgs, "backup tls contains caCert path in job args") + jobVolumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts + ts.Assert().NotNil(jobVolumeMounts) + ts.Assert().Contains(jobVolumeMounts, expectVolumeMount, "backup ca cert in job volume mount") + jobVolumes := job.Spec.Template.Spec.Volumes + ts.Assert().NotNil(jobVolumes) + ts.Assert().Contains(jobVolumes, expectVolume, "backup ca cert in job volume mount") +} + func (ts *BackupTestSuite) newBackupWithSecurityContext() *k8upv1.Backup { runAsNonRoot := true sc := &corev1.PodSecurityContext{ diff --git a/operator/checkcontroller/controller_integration_test.go b/operator/checkcontroller/controller_integration_test.go index 9b6bebafe..0ba9f09af 100644 --- a/operator/checkcontroller/controller_integration_test.go +++ b/operator/checkcontroller/controller_integration_test.go @@ -26,6 +26,8 @@ type CheckTestSuite struct { GivenChecks []*k8upv1.Check KeepSuccessful int KeepFailed int + + BackupResource *k8upv1.Backup } func Test_Check(t *testing.T) { @@ -136,3 +138,25 @@ func (ts *CheckTestSuite) expectNumberOfJobs(jobAmount int) { ts.Assert().GreaterOrEqual(jobsLen, jobAmount) } + +func (ts *CheckTestSuite) Test_GivenCheckWithTlsOptions_ExpectCheckJobWithTlsOptions() { + checkResource := ts.newCheckTls() + ts.EnsureResources(checkResource) + + result := ts.whenReconciling(checkResource) + ts.Require().GreaterOrEqual(result.RequeueAfter, 30*time.Second) + + checkJob := ts.expectACheckJob() + ts.assertCheckTlsVolumeAndTlsOptions(checkJob) +} + +func (ts *CheckTestSuite) Test_GivenCheckWithMutualTlsOptions_ExpectCheckJobWithMutualTlsOptions() { + checkResource := ts.newCheckMutualTls() + ts.EnsureResources(checkResource) + + result := ts.whenReconciling(checkResource) + ts.Require().GreaterOrEqual(result.RequeueAfter, 30*time.Second) + + checkJob := ts.expectACheckJob() + ts.assertCheckMutualTlsVolumeAndMutualTlsOptions(checkJob) +} diff --git a/operator/checkcontroller/controller_utils_integration_test.go b/operator/checkcontroller/controller_utils_integration_test.go new file mode 100644 index 000000000..55c4c3cac --- /dev/null +++ b/operator/checkcontroller/controller_utils_integration_test.go @@ -0,0 +1,172 @@ +//go:build integration + +package checkcontroller + +import ( + "github.com/k8up-io/k8up/v2/operator/cfg" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/utils/ptr" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + k8upv1 "github.com/k8up-io/k8up/v2/api/v1" +) + +const ( + checkTlsVolumeName = "minio-client-tls" + checkTlsVolumeSecretName = "minio-client-tls" + checkTlsVolumeMount = "/mnt/tls" + checkTlsCaCertPath = checkTlsVolumeMount + "/ca.cert" + + checkMutualTlsVolumeName = "minio-client-mtls" + checkMutualTlsVolumeSecretName = "minio-client-mtls" + checkMutualTlsVolumeMount = "/mnt/mtls" + checkMutualTlsCaCertPath = checkMutualTlsVolumeMount + "/ca.cert" + checkMutualTlsClientCertPath = checkMutualTlsVolumeMount + "/client.cert" + checkMutualTlsKeyCertPath = checkMutualTlsVolumeMount + "/client.key" +) + +func (ts *CheckTestSuite) expectACheckJob() (foundJob *batchv1.Job) { + jobs := new(batchv1.JobList) + err := ts.Client.List(ts.Ctx, jobs, client.InNamespace(ts.NS)) + ts.Require().NoError(err) + + jobsLen := len(jobs.Items) + ts.T().Logf("%d Jobs found", jobsLen) + ts.Require().Len(jobs.Items, 1, "job exists") + return &jobs.Items[0] +} + +func (ts *CheckTestSuite) whenReconciling(object *k8upv1.Check) controllerruntime.Result { + controller := CheckReconciler{ + Kube: ts.Client, + } + + result, err := controller.Provision(ts.Ctx, object) + ts.Require().NoError(err) + + return result +} + +func (ts *CheckTestSuite) newCheckTls() *k8upv1.Check { + return &k8upv1.Check{ + ObjectMeta: metav1.ObjectMeta{ + Name: "check", + Namespace: ts.NS, + UID: uuid.NewUUID(), + }, + Spec: k8upv1.CheckSpec{ + RunnableSpec: k8upv1.RunnableSpec{ + Backend: &k8upv1.Backend{ + TLSOptions: &k8upv1.TLSOptions{CACert: checkTlsCaCertPath}, + VolumeMounts: &[]corev1.VolumeMount{ + { + Name: checkTlsVolumeName, + MountPath: checkTlsVolumeMount, + }, + }, + }, + Volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: checkTlsVolumeName, + Secret: &corev1.SecretVolumeSource{ + SecretName: checkTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + }, + }, + }, + } +} + +func (ts *CheckTestSuite) assertCheckTlsVolumeAndTlsOptions(job *batchv1.Job) { + expectArgs := []string{"-varDir", cfg.Config.PodVarDir, "-check", "-caCert", checkTlsCaCertPath} + expectVolumeMount := corev1.VolumeMount{Name: checkTlsVolumeName, MountPath: checkTlsVolumeMount} + expectVolume := corev1.Volume{ + Name: checkTlsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: checkTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + } + + jobArguments := job.Spec.Template.Spec.Containers[0].Args + ts.Assert().Equal(jobArguments, expectArgs, "check tls contains caCert path in job args") + jobVolumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts + ts.Assert().NotNil(jobVolumeMounts) + ts.Assert().Contains(jobVolumeMounts, expectVolumeMount, "check ca cert in job volume mount") + jobVolumes := job.Spec.Template.Spec.Volumes + ts.Assert().NotNil(jobVolumes) + ts.Assert().Contains(jobVolumes, expectVolume, "check ca cert in job volume mount") +} + +func (ts *CheckTestSuite) newCheckMutualTls() *k8upv1.Check { + return &k8upv1.Check{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backup", + Namespace: ts.NS, + UID: uuid.NewUUID(), + }, + Spec: k8upv1.CheckSpec{ + RunnableSpec: k8upv1.RunnableSpec{ + Backend: &k8upv1.Backend{ + TLSOptions: &k8upv1.TLSOptions{ + CACert: checkMutualTlsCaCertPath, + ClientCert: checkMutualTlsClientCertPath, + ClientKey: checkMutualTlsKeyCertPath, + }, + VolumeMounts: &[]corev1.VolumeMount{ + { + Name: checkMutualTlsVolumeName, + MountPath: checkMutualTlsVolumeMount, + }, + }, + }, + Volumes: &[]k8upv1.RunnableVolumeSpec{ + { + Name: checkMutualTlsVolumeName, + Secret: &corev1.SecretVolumeSource{ + SecretName: checkMutualTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + }, + }, + }, + } +} + +func (ts *CheckTestSuite) assertCheckMutualTlsVolumeAndMutualTlsOptions(job *batchv1.Job) { + expectArgs := []string{ + "-varDir", cfg.Config.PodVarDir, + "-check", + "-caCert", checkMutualTlsCaCertPath, + "-clientCert", checkMutualTlsClientCertPath, + "-clientKey", checkMutualTlsKeyCertPath, + } + expectVolumeMount := corev1.VolumeMount{Name: checkMutualTlsVolumeName, MountPath: checkMutualTlsVolumeMount} + expectVolume := corev1.Volume{ + Name: checkMutualTlsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: checkMutualTlsVolumeSecretName, + DefaultMode: ptr.To(corev1.SecretVolumeSourceDefaultMode), + }, + }, + } + + jobArguments := job.Spec.Template.Spec.Containers[0].Args + ts.Assert().Equal(jobArguments, expectArgs, "check tls contains caCert path in job args") + jobVolumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts + ts.Assert().NotNil(jobVolumeMounts) + ts.Assert().Contains(jobVolumeMounts, expectVolumeMount, "check ca cert in job volume mount") + jobVolumes := job.Spec.Template.Spec.Volumes + ts.Assert().NotNil(jobVolumes) + ts.Assert().Contains(jobVolumes, expectVolume, "check ca cert in job volume mount") +}