From ddf6663c9c06fdb834c42aa17a36a470870d4815 Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Mon, 18 Mar 2024 16:02:41 +0530 Subject: [PATCH 1/4] Add options to specify dataPoolSpec and additionalDataPools for cephfs The dataPoolSpec field is added to allow specifying the poolSpec for the default data pool for cephfs. The additionalDataPools field is added to allow specifying additional named cephfs data pools. If any value is not specified for a pool,we use the default. Signed-off-by: Malay Kumar Parida --- api/v1/storagecluster_types.go | 4 ++ controllers/storagecluster/cephfilesystem.go | 43 +++++++++++++++++--- 2 files changed, 42 insertions(+), 5 deletions(-) diff --git a/api/v1/storagecluster_types.go b/api/v1/storagecluster_types.go index d96fc775d6..2084767965 100644 --- a/api/v1/storagecluster_types.go +++ b/api/v1/storagecluster_types.go @@ -237,6 +237,10 @@ type ManageCephFilesystems struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ StorageClassName string `json:"storageClassName,omitempty"` + // DataPoolSpec specifies the pool specification for the default cephfs data pool + DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"` + // AdditionalDataPools specifies list of additional named cephfs data pools + AdditionalDataPools []rookCephv1.NamedPoolSpec `json:"additionalDataPools,omitempty"` } // ManageCephObjectStores defines how to reconcile CephObjectStores diff --git a/controllers/storagecluster/cephfilesystem.go b/controllers/storagecluster/cephfilesystem.go index ebe6a3eaed..2137c54844 100644 --- a/controllers/storagecluster/cephfilesystem.go +++ b/controllers/storagecluster/cephfilesystem.go @@ -51,15 +51,39 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster // standalone deployment that isn't in provider cluster will not // have storageProfile, we need to define default dataPool, if // storageProfile is set this will be overridden. + // Use the specified poolSpec, if it is unset then the default poolSpec will be used ret.Spec.DataPools = []cephv1.NamedPoolSpec{ { - PoolSpec: cephv1.PoolSpec{ - DeviceClass: generateDeviceClass(initStorageCluster), - Replicated: generateCephReplicatedSpec(initStorageCluster, "data"), - FailureDomain: initStorageCluster.Status.FailureDomain, - }, + PoolSpec: initStorageCluster.Spec.ManagedResources.CephFilesystems.DataPoolSpec, }, } + + // Append additional pools from specified additional data pools + ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...) + + // Iterate over each pool and set default values if necessary + defaultPoolSpec := generateDefaultPoolSpec(initStorageCluster) + for i := range ret.Spec.DataPools { + pool := &ret.Spec.DataPools[i] + // Set default device class if not specified + if pool.PoolSpec.DeviceClass == "" { + pool.PoolSpec.DeviceClass = defaultPoolSpec.DeviceClass + } + // Set default replication settings if not specified + if pool.PoolSpec.Replicated.Size == 0 { + pool.PoolSpec.Replicated.Size = defaultPoolSpec.Replicated.Size + } + if pool.PoolSpec.Replicated.ReplicasPerFailureDomain == 0 { + pool.PoolSpec.Replicated.ReplicasPerFailureDomain = defaultPoolSpec.Replicated.ReplicasPerFailureDomain + } + if pool.PoolSpec.Replicated.TargetSizeRatio == 0 { + pool.PoolSpec.Replicated.TargetSizeRatio = defaultPoolSpec.Replicated.TargetSizeRatio + } + // Set default failure domain if not specified + if pool.PoolSpec.FailureDomain == "" { + pool.PoolSpec.FailureDomain = defaultPoolSpec.FailureDomain + } + } } else { // Load all StorageProfile objects in the StorageCluster's namespace storageProfiles := &ocsv1.StorageProfileList{} @@ -295,3 +319,12 @@ func getActiveMetadataServers(sc *ocsv1.StorageCluster) int { return defaults.CephFSActiveMetadataServers } + +// Define a function to generate default pool specifications +func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec { + return cephv1.PoolSpec{ + DeviceClass: generateDeviceClass(sc), + Replicated: generateCephReplicatedSpec(sc, "data"), + FailureDomain: sc.Status.FailureDomain, + } +} From 9eb2a29cd2eea5189776efd26f2e468eefe41687 Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Mon, 18 Mar 2024 16:05:38 +0530 Subject: [PATCH 2/4] Silence controller-gen error regarding float type usage as dangerous Signed-off-by: Malay Kumar Parida --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3e9af0e720..e3b9fe45fd 100644 --- a/Makefile +++ b/Makefile @@ -134,7 +134,7 @@ generate: controller-gen # Generate manifests e.g. CRD, RBAC etc. manifests: controller-gen @echo Updating generated manifests - $(CONTROLLER_GEN) rbac:roleName=manager-role crd:generateEmbeddedObjectMeta=true paths=./api/... webhook paths="./..." output:crd:artifacts:config=config/crd/bases + $(CONTROLLER_GEN) rbac:roleName=manager-role crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true paths=./api/... webhook paths="./..." output:crd:artifacts:config=config/crd/bases verify-deps: deps-update @echo "Verifying dependency files" From 52ee853b60cab53e0a97a754922d2e99d664e272 Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Mon, 18 Mar 2024 16:14:17 +0530 Subject: [PATCH 3/4] Add autogenerated changes make gen-latest-csv && make update-generated && make deps-update Signed-off-by: Malay Kumar Parida --- api/v1/zz_generated.deepcopy.go | 10 +- .../ocs.openshift.io_storageclusters.yaml | 434 ++++++++++++++++++ .../ocs/ocs.openshift.io_storageclusters.yaml | 434 ++++++++++++++++++ .../manifests/storagecluster.crd.yaml | 434 ++++++++++++++++++ .../api/v4/v1/storagecluster_types.go | 4 + .../api/v4/v1/zz_generated.deepcopy.go | 10 +- 6 files changed, 1324 insertions(+), 2 deletions(-) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 621ae5c27c..63f517184a 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -320,6 +320,14 @@ func (in *ManageCephDashboard) DeepCopy() *ManageCephDashboard { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManageCephFilesystems) DeepCopyInto(out *ManageCephFilesystems) { *out = *in + in.DataPoolSpec.DeepCopyInto(&out.DataPoolSpec) + if in.AdditionalDataPools != nil { + in, out := &in.AdditionalDataPools, &out.AdditionalDataPools + *out = make([]ceph_rook_iov1.NamedPoolSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephFilesystems. @@ -422,7 +430,7 @@ func (in *ManagedResourcesSpec) DeepCopyInto(out *ManagedResourcesSpec) { out.CephDashboard = in.CephDashboard out.CephBlockPools = in.CephBlockPools in.CephNonResilientPools.DeepCopyInto(&out.CephNonResilientPools) - out.CephFilesystems = in.CephFilesystems + in.CephFilesystems.DeepCopyInto(&out.CephFilesystems) in.CephObjectStores.DeepCopyInto(&out.CephObjectStores) out.CephObjectStoreUsers = in.CephObjectStoreUsers out.CephToolbox = in.CephToolbox diff --git a/config/crd/bases/ocs.openshift.io_storageclusters.yaml b/config/crd/bases/ocs.openshift.io_storageclusters.yaml index 998f47c8c5..15d453f9c1 100644 --- a/config/crd/bases/ocs.openshift.io_storageclusters.yaml +++ b/config/crd/bases/ocs.openshift.io_storageclusters.yaml @@ -760,6 +760,440 @@ spec: properties: activeMetadataServers: type: integer + additionalDataPools: + description: AdditionalDataPools specifies list of additional + named cephfs data pools + items: + description: NamedPoolSpec represents the named ceph pool + spec + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., + Parameters["compression_mode"] = "force" The inline + compression mode in Bluestore OSD to set to (options + are: none, passive, aggressive, force) Do NOT set + a default value for kubebuilder as this will override + the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to + for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object + in an erasure coded storage pool (required for + erasure-coded pool type). This is the number of + OSDs that can be lost simultaneously before data + cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in + an erasure coded storage pool (required for erasure-coded + pool type). The number of chunks required to recover + an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the + number of data chunks, the higher the cost of + recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in + the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either + pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling + of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, + only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + name: + description: Name of the pool + type: string + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in + objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) + for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents + low performance tier (for example HDDs) for + remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number + of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object + in a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to + Ceph in terms of expected consumption of the total + cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like + 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: array + dataPoolSpec: + description: DataPoolSpec specifies the pool specification + for the default cephfs data pool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., + Parameters["compression_mode"] = "force" The inline + compression mode in Bluestore OSD to set to (options + are: none, passive, aggressive, force) Do NOT set a + default value for kubebuilder as this will override + the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in + an erasure coded storage pool (required for erasure-coded + pool type). This is the number of OSDs that can + be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an + erasure coded storage pool (required for erasure-coded + pool type). The number of chunks required to recover + an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number + of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object disableSnapshotClass: type: boolean disableStorageClass: diff --git a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml index 998f47c8c5..15d453f9c1 100644 --- a/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml +++ b/deploy/csv-templates/crds/ocs/ocs.openshift.io_storageclusters.yaml @@ -760,6 +760,440 @@ spec: properties: activeMetadataServers: type: integer + additionalDataPools: + description: AdditionalDataPools specifies list of additional + named cephfs data pools + items: + description: NamedPoolSpec represents the named ceph pool + spec + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., + Parameters["compression_mode"] = "force" The inline + compression mode in Bluestore OSD to set to (options + are: none, passive, aggressive, force) Do NOT set + a default value for kubebuilder as this will override + the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to + for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object + in an erasure coded storage pool (required for + erasure-coded pool type). This is the number of + OSDs that can be lost simultaneously before data + cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in + an erasure coded storage pool (required for erasure-coded + pool type). The number of chunks required to recover + an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the + number of data chunks, the higher the cost of + recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in + the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either + pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling + of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, + only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + name: + description: Name of the pool + type: string + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in + objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) + for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents + low performance tier (for example HDDs) for + remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number + of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object + in a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to + Ceph in terms of expected consumption of the total + cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like + 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: array + dataPoolSpec: + description: DataPoolSpec specifies the pool specification + for the default cephfs data pool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., + Parameters["compression_mode"] = "force" The inline + compression mode in Bluestore OSD to set to (options + are: none, passive, aggressive, force) Do NOT set a + default value for kubebuilder as this will override + the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in + an erasure coded storage pool (required for erasure-coded + pool type). This is the number of OSDs that can + be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an + erasure coded storage pool (required for erasure-coded + pool type). The number of chunks required to recover + an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number + of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object disableSnapshotClass: type: boolean disableStorageClass: diff --git a/deploy/ocs-operator/manifests/storagecluster.crd.yaml b/deploy/ocs-operator/manifests/storagecluster.crd.yaml index acf33c7356..faec9c38e4 100644 --- a/deploy/ocs-operator/manifests/storagecluster.crd.yaml +++ b/deploy/ocs-operator/manifests/storagecluster.crd.yaml @@ -759,6 +759,440 @@ spec: properties: activeMetadataServers: type: integer + additionalDataPools: + description: AdditionalDataPools specifies list of additional + named cephfs data pools + items: + description: NamedPoolSpec represents the named ceph pool + spec + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., + Parameters["compression_mode"] = "force" The inline + compression mode in Bluestore OSD to set to (options + are: none, passive, aggressive, force) Do NOT set + a default value for kubebuilder as this will override + the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to + for use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object + in an erasure coded storage pool (required for + erasure-coded pool type). This is the number of + OSDs that can be lost simultaneously before data + cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in + an erasure coded storage pool (required for erasure-coded + pool type). The number of chunks required to recover + an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the + number of data chunks, the higher the cost of + recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in + the crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either + pool or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling + of snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, + only valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + name: + description: Name of the pool + type: string + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in + objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) + for Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents + low performance tier (for example HDDs) for + remaining OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number + of replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object + in a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to + Ceph in terms of expected consumption of the total + cluster capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like + 60s for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + type: array + dataPoolSpec: + description: DataPoolSpec specifies the pool specification + for the default cephfs data pool + properties: + application: + description: The application name to set on the pool. + Only expected to be set for rgw pools. + type: string + compressionMode: + description: 'DEPRECATED: use Parameters instead, e.g., + Parameters["compression_mode"] = "force" The inline + compression mode in Bluestore OSD to set to (options + are: none, passive, aggressive, force) Do NOT set a + default value for kubebuilder as this will override + the Parameters' + enum: + - none + - passive + - aggressive + - force + - "" + nullable: true + type: string + crushRoot: + description: The root of the crush hierarchy utilized + by the pool + nullable: true + type: string + deviceClass: + description: The device class the OSD should set to for + use in the pool + nullable: true + type: string + enableRBDStats: + description: EnableRBDStats is used to enable gathering + of statistics for all RBD images in the pool + type: boolean + erasureCoded: + description: The erasure code settings + properties: + algorithm: + description: The algorithm for erasure coding + type: string + codingChunks: + description: Number of coding chunks per object in + an erasure coded storage pool (required for erasure-coded + pool type). This is the number of OSDs that can + be lost simultaneously before data cannot be recovered. + minimum: 0 + type: integer + dataChunks: + description: Number of data chunks per object in an + erasure coded storage pool (required for erasure-coded + pool type). The number of chunks required to recover + an object when any single OSD is lost is the same + as dataChunks so be aware that the larger the number + of data chunks, the higher the cost of recovery. + minimum: 0 + type: integer + required: + - codingChunks + - dataChunks + type: object + failureDomain: + description: 'The failure domain: osd/host/(region or + zone if available) - technically also any type in the + crush map' + type: string + mirroring: + description: The mirroring settings + properties: + enabled: + description: Enabled whether this pool is mirrored + or not + type: boolean + mode: + description: 'Mode is the mirroring mode: either pool + or image' + type: string + peers: + description: Peers represents the peers spec + nullable: true + properties: + secretNames: + description: SecretNames represents the Kubernetes + Secret names to add rbd-mirror or cephfs-mirror + peers + items: + type: string + type: array + type: object + snapshotSchedules: + description: SnapshotSchedules is the scheduling of + snapshot for mirrored images/pools + items: + description: SnapshotScheduleSpec represents the + snapshot scheduling settings of a mirrored pool + properties: + interval: + description: Interval represent the periodicity + of the snapshot. + type: string + path: + description: Path is the path to snapshot, only + valid for CephFS + type: string + startTime: + description: StartTime indicates when to start + the snapshot + type: string + type: object + type: array + type: object + parameters: + additionalProperties: + type: string + description: Parameters is a list of properties to enable + on a given pool + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + quotas: + description: The quota settings + nullable: true + properties: + maxBytes: + description: MaxBytes represents the quota in bytes + Deprecated in favor of MaxSize + format: int64 + type: integer + maxObjects: + description: MaxObjects represents the quota in objects + format: int64 + type: integer + maxSize: + description: MaxSize represents the quota in bytes + as a string + pattern: ^[0-9]+[\.]?[0-9]*([KMGTPE]i|[kMGTPE])?$ + type: string + type: object + replicated: + description: The replication settings + properties: + hybridStorage: + description: HybridStorage represents hybrid storage + tier settings + nullable: true + properties: + primaryDeviceClass: + description: PrimaryDeviceClass represents high + performance tier (for example SSD or NVME) for + Primary OSD + minLength: 1 + type: string + secondaryDeviceClass: + description: SecondaryDeviceClass represents low + performance tier (for example HDDs) for remaining + OSDs + minLength: 1 + type: string + required: + - primaryDeviceClass + - secondaryDeviceClass + type: object + replicasPerFailureDomain: + description: ReplicasPerFailureDomain the number of + replica in the specified failure domain + minimum: 1 + type: integer + requireSafeReplicaSize: + description: RequireSafeReplicaSize if false allows + you to set replica 1 + type: boolean + size: + description: Size - Number of copies per object in + a replicated storage pool, including the object + itself (required for replicated pool type) + minimum: 0 + type: integer + subFailureDomain: + description: SubFailureDomain the name of the sub-failure + domain + type: string + targetSizeRatio: + description: TargetSizeRatio gives a hint (%) to Ceph + in terms of expected consumption of the total cluster + capacity + type: number + required: + - size + type: object + statusCheck: + description: The mirroring statusCheck + properties: + mirror: + description: HealthCheckSpec represents the health + check of an object store bucket + nullable: true + properties: + disabled: + type: boolean + interval: + description: Interval is the internal in second + or minute for the health check to run like 60s + for 60 seconds + type: string + timeout: + type: string + type: object + type: object + x-kubernetes-preserve-unknown-fields: true + type: object disableSnapshotClass: type: boolean disableStorageClass: diff --git a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go index d96fc775d6..2084767965 100644 --- a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go +++ b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/storagecluster_types.go @@ -237,6 +237,10 @@ type ManageCephFilesystems struct { // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ StorageClassName string `json:"storageClassName,omitempty"` + // DataPoolSpec specifies the pool specification for the default cephfs data pool + DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"` + // AdditionalDataPools specifies list of additional named cephfs data pools + AdditionalDataPools []rookCephv1.NamedPoolSpec `json:"additionalDataPools,omitempty"` } // ManageCephObjectStores defines how to reconcile CephObjectStores diff --git a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go index 621ae5c27c..63f517184a 100644 --- a/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/red-hat-storage/ocs-operator/api/v4/v1/zz_generated.deepcopy.go @@ -320,6 +320,14 @@ func (in *ManageCephDashboard) DeepCopy() *ManageCephDashboard { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ManageCephFilesystems) DeepCopyInto(out *ManageCephFilesystems) { *out = *in + in.DataPoolSpec.DeepCopyInto(&out.DataPoolSpec) + if in.AdditionalDataPools != nil { + in, out := &in.AdditionalDataPools, &out.AdditionalDataPools + *out = make([]ceph_rook_iov1.NamedPoolSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManageCephFilesystems. @@ -422,7 +430,7 @@ func (in *ManagedResourcesSpec) DeepCopyInto(out *ManagedResourcesSpec) { out.CephDashboard = in.CephDashboard out.CephBlockPools = in.CephBlockPools in.CephNonResilientPools.DeepCopyInto(&out.CephNonResilientPools) - out.CephFilesystems = in.CephFilesystems + in.CephFilesystems.DeepCopyInto(&out.CephFilesystems) in.CephObjectStores.DeepCopyInto(&out.CephObjectStores) out.CephObjectStoreUsers = in.CephObjectStoreUsers out.CephToolbox = in.CephToolbox From 41253f8f8ec0c1b0aa371d9cca0245175cede7ca Mon Sep 17 00:00:00 2001 From: Malay Kumar Parida Date: Mon, 18 Mar 2024 19:10:14 +0530 Subject: [PATCH 4/4] Add unit-test for various scenarios of specifying data polls in cephfs Signed-off-by: Malay Kumar Parida --- .../storagecluster/cephfilesystem_test.go | 225 ++++++++++++++++++ 1 file changed, 225 insertions(+) diff --git a/controllers/storagecluster/cephfilesystem_test.go b/controllers/storagecluster/cephfilesystem_test.go index bfbb0611ee..0f32f6e9b0 100644 --- a/controllers/storagecluster/cephfilesystem_test.go +++ b/controllers/storagecluster/cephfilesystem_test.go @@ -191,3 +191,228 @@ func TestGetActiveMetadataServers(t *testing.T) { } } + +func TestCephFileSystemDataPools(t *testing.T) { + mocksc := &api.StorageCluster{} + mockStorageCluster.DeepCopyInto(mocksc) + mocksc.Status.FailureDomain = "zone" + defaultPoolSpec := generateDefaultPoolSpec(mocksc) + var cases = []struct { + label string + sc *api.StorageCluster + expectedDataPools []cephv1.NamedPoolSpec + }{ + { + label: "Neither DataPoolSpec nor AdditionalDataPools is set", + sc: &api.StorageCluster{}, + expectedDataPools: []cephv1.NamedPoolSpec{ + { + PoolSpec: defaultPoolSpec, + }, + }, + }, + { + label: "DataPoolSpec is set & AdditionalDataPools is not set", + sc: &api.StorageCluster{ + Spec: api.StorageClusterSpec{ + ManagedResources: api.ManagedResourcesSpec{ + CephFilesystems: api.ManageCephFilesystems{ + DataPoolSpec: cephv1.PoolSpec{ + DeviceClass: "gold", + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.8, + }, + }, + }, + }, + }, + }, + expectedDataPools: []cephv1.NamedPoolSpec{ + { + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "gold", + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.8, + ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, + }, + FailureDomain: defaultPoolSpec.FailureDomain, + }, + }, + }, + }, + { + label: "DataPoolSpec is not set & One item is set on AdditionalDataPools", + sc: &api.StorageCluster{ + Spec: api.StorageClusterSpec{ + ManagedResources: api.ManagedResourcesSpec{ + CephFilesystems: api.ManageCephFilesystems{ + AdditionalDataPools: []cephv1.NamedPoolSpec{ + { + Name: "test-1", + PoolSpec: cephv1.PoolSpec{ + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.3, + }, + }, + }, + }, + }, + }, + }, + }, + expectedDataPools: []cephv1.NamedPoolSpec{ + { + PoolSpec: defaultPoolSpec, + }, + { + Name: "test-1", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: defaultPoolSpec.DeviceClass, + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.3, + ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, + }, + FailureDomain: defaultPoolSpec.FailureDomain, + }, + }, + }, + }, + { + label: "DataPoolSpec is not set & multiple AdditionalDataPools are set", + sc: &api.StorageCluster{ + Spec: api.StorageClusterSpec{ + ManagedResources: api.ManagedResourcesSpec{ + CephFilesystems: api.ManageCephFilesystems{ + AdditionalDataPools: []cephv1.NamedPoolSpec{ + { + Name: "test-1", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "gold", + }, + }, + { + Name: "test-2", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "silver", + }, + }, + }, + }, + }, + }, + }, + expectedDataPools: []cephv1.NamedPoolSpec{ + { + PoolSpec: defaultPoolSpec, + }, + { + Name: "test-1", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "gold", + Replicated: defaultPoolSpec.Replicated, + FailureDomain: defaultPoolSpec.FailureDomain, + }, + }, + { + Name: "test-2", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "silver", + Replicated: defaultPoolSpec.Replicated, + FailureDomain: defaultPoolSpec.FailureDomain, + }, + }, + }, + }, + { + label: "DataPoolSpec is set & multiple AdditionalDataPools are set", + sc: &api.StorageCluster{ + Spec: api.StorageClusterSpec{ + ManagedResources: api.ManagedResourcesSpec{ + CephFilesystems: api.ManageCephFilesystems{ + DataPoolSpec: cephv1.PoolSpec{ + DeviceClass: "gold", + Replicated: cephv1.ReplicatedSpec{ + TargetSizeRatio: 0.1, + }, + }, + AdditionalDataPools: []cephv1.NamedPoolSpec{ + { + Name: "test-1", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "silver", + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.25, + }, + }, + }, + { + Name: "test-2", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "bronze", + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.25, + }, + }, + }, + }, + }, + }, + }, + }, + expectedDataPools: []cephv1.NamedPoolSpec{ + { + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "gold", + Replicated: cephv1.ReplicatedSpec{ + Size: defaultPoolSpec.Replicated.Size, + TargetSizeRatio: 0.1, + ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, + }, + FailureDomain: defaultPoolSpec.FailureDomain, + }, + }, + { + Name: "test-1", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "silver", + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.25, + ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, + }, + FailureDomain: defaultPoolSpec.FailureDomain, + }, + }, + { + Name: "test-2", + PoolSpec: cephv1.PoolSpec{ + DeviceClass: "bronze", + Replicated: cephv1.ReplicatedSpec{ + Size: 2, + TargetSizeRatio: 0.25, + ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain, + }, + FailureDomain: defaultPoolSpec.FailureDomain, + }, + }, + }, + }, + } + + for _, c := range cases { + t.Logf("Case: %s\n", c.label) + var objects []client.Object + t, reconciler, _, _ := initStorageClusterResourceCreateUpdateTest(t, objects, nil) + c.sc.Status.FailureDomain = "zone" + filesystem, err := reconciler.newCephFilesystemInstances(c.sc) + assert.NoError(t, err) + actualDataPools := filesystem[0].Spec.DataPools + assert.Equal(t, c.expectedDataPools, actualDataPools) + } +}