Skip to content

Commit

Permalink
Merge pull request #2494 from malayparida2000/cephfs_replica2
Browse files Browse the repository at this point in the history
Add options for specifying dataPool and additionalDataPools for cephfs
  • Loading branch information
openshift-merge-bot[bot] authored Mar 21, 2024
2 parents a0c4112 + 41253f8 commit 8f94626
Show file tree
Hide file tree
Showing 10 changed files with 1,592 additions and 8 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ generate: controller-gen
# Generate manifests e.g. CRD, RBAC etc.
manifests: controller-gen
@echo Updating generated manifests
$(CONTROLLER_GEN) rbac:roleName=manager-role crd:generateEmbeddedObjectMeta=true paths=./api/... webhook paths="./..." output:crd:artifacts:config=config/crd/bases
$(CONTROLLER_GEN) rbac:roleName=manager-role crd:generateEmbeddedObjectMeta=true,allowDangerousTypes=true paths=./api/... webhook paths="./..." output:crd:artifacts:config=config/crd/bases

verify-deps: deps-update
@echo "Verifying dependency files"
Expand Down
4 changes: 4 additions & 0 deletions api/v1/storagecluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,10 @@ type ManageCephFilesystems struct {
// +kubebuilder:validation:MaxLength=253
// +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
StorageClassName string `json:"storageClassName,omitempty"`
// DataPoolSpec specifies the pool specification for the default cephfs data pool
DataPoolSpec rookCephv1.PoolSpec `json:"dataPoolSpec,omitempty"`
// AdditionalDataPools specifies list of additional named cephfs data pools
AdditionalDataPools []rookCephv1.NamedPoolSpec `json:"additionalDataPools,omitempty"`
}

// ManageCephObjectStores defines how to reconcile CephObjectStores
Expand Down
10 changes: 9 additions & 1 deletion api/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

434 changes: 434 additions & 0 deletions config/crd/bases/ocs.openshift.io_storageclusters.yaml

Large diffs are not rendered by default.

43 changes: 38 additions & 5 deletions controllers/storagecluster/cephfilesystem.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,15 +51,39 @@ func (r *StorageClusterReconciler) newCephFilesystemInstances(initStorageCluster
// standalone deployment that isn't in provider cluster will not
// have storageProfile, we need to define default dataPool, if
// storageProfile is set this will be overridden.
// Use the specified poolSpec, if it is unset then the default poolSpec will be used
ret.Spec.DataPools = []cephv1.NamedPoolSpec{
{
PoolSpec: cephv1.PoolSpec{
DeviceClass: generateDeviceClass(initStorageCluster),
Replicated: generateCephReplicatedSpec(initStorageCluster, "data"),
FailureDomain: initStorageCluster.Status.FailureDomain,
},
PoolSpec: initStorageCluster.Spec.ManagedResources.CephFilesystems.DataPoolSpec,
},
}

// Append additional pools from specified additional data pools
ret.Spec.DataPools = append(ret.Spec.DataPools, initStorageCluster.Spec.ManagedResources.CephFilesystems.AdditionalDataPools...)

// Iterate over each pool and set default values if necessary
defaultPoolSpec := generateDefaultPoolSpec(initStorageCluster)
for i := range ret.Spec.DataPools {
pool := &ret.Spec.DataPools[i]
// Set default device class if not specified
if pool.PoolSpec.DeviceClass == "" {
pool.PoolSpec.DeviceClass = defaultPoolSpec.DeviceClass
}
// Set default replication settings if not specified
if pool.PoolSpec.Replicated.Size == 0 {
pool.PoolSpec.Replicated.Size = defaultPoolSpec.Replicated.Size
}
if pool.PoolSpec.Replicated.ReplicasPerFailureDomain == 0 {
pool.PoolSpec.Replicated.ReplicasPerFailureDomain = defaultPoolSpec.Replicated.ReplicasPerFailureDomain
}
if pool.PoolSpec.Replicated.TargetSizeRatio == 0 {
pool.PoolSpec.Replicated.TargetSizeRatio = defaultPoolSpec.Replicated.TargetSizeRatio
}
// Set default failure domain if not specified
if pool.PoolSpec.FailureDomain == "" {
pool.PoolSpec.FailureDomain = defaultPoolSpec.FailureDomain
}
}
} else {
// Load all StorageProfile objects in the StorageCluster's namespace
storageProfiles := &ocsv1.StorageProfileList{}
Expand Down Expand Up @@ -295,3 +319,12 @@ func getActiveMetadataServers(sc *ocsv1.StorageCluster) int {

return defaults.CephFSActiveMetadataServers
}

// Define a function to generate default pool specifications
func generateDefaultPoolSpec(sc *ocsv1.StorageCluster) cephv1.PoolSpec {
return cephv1.PoolSpec{
DeviceClass: generateDeviceClass(sc),
Replicated: generateCephReplicatedSpec(sc, "data"),
FailureDomain: sc.Status.FailureDomain,
}
}
225 changes: 225 additions & 0 deletions controllers/storagecluster/cephfilesystem_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,3 +191,228 @@ func TestGetActiveMetadataServers(t *testing.T) {
}

}

func TestCephFileSystemDataPools(t *testing.T) {
mocksc := &api.StorageCluster{}
mockStorageCluster.DeepCopyInto(mocksc)
mocksc.Status.FailureDomain = "zone"
defaultPoolSpec := generateDefaultPoolSpec(mocksc)
var cases = []struct {
label string
sc *api.StorageCluster
expectedDataPools []cephv1.NamedPoolSpec
}{
{
label: "Neither DataPoolSpec nor AdditionalDataPools is set",
sc: &api.StorageCluster{},
expectedDataPools: []cephv1.NamedPoolSpec{
{
PoolSpec: defaultPoolSpec,
},
},
},
{
label: "DataPoolSpec is set & AdditionalDataPools is not set",
sc: &api.StorageCluster{
Spec: api.StorageClusterSpec{
ManagedResources: api.ManagedResourcesSpec{
CephFilesystems: api.ManageCephFilesystems{
DataPoolSpec: cephv1.PoolSpec{
DeviceClass: "gold",
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.8,
},
},
},
},
},
},
expectedDataPools: []cephv1.NamedPoolSpec{
{
PoolSpec: cephv1.PoolSpec{
DeviceClass: "gold",
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.8,
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
},
},
},
},
{
label: "DataPoolSpec is not set & One item is set on AdditionalDataPools",
sc: &api.StorageCluster{
Spec: api.StorageClusterSpec{
ManagedResources: api.ManagedResourcesSpec{
CephFilesystems: api.ManageCephFilesystems{
AdditionalDataPools: []cephv1.NamedPoolSpec{
{
Name: "test-1",
PoolSpec: cephv1.PoolSpec{
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.3,
},
},
},
},
},
},
},
},
expectedDataPools: []cephv1.NamedPoolSpec{
{
PoolSpec: defaultPoolSpec,
},
{
Name: "test-1",
PoolSpec: cephv1.PoolSpec{
DeviceClass: defaultPoolSpec.DeviceClass,
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.3,
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
},
},
},
},
{
label: "DataPoolSpec is not set & multiple AdditionalDataPools are set",
sc: &api.StorageCluster{
Spec: api.StorageClusterSpec{
ManagedResources: api.ManagedResourcesSpec{
CephFilesystems: api.ManageCephFilesystems{
AdditionalDataPools: []cephv1.NamedPoolSpec{
{
Name: "test-1",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "gold",
},
},
{
Name: "test-2",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "silver",
},
},
},
},
},
},
},
expectedDataPools: []cephv1.NamedPoolSpec{
{
PoolSpec: defaultPoolSpec,
},
{
Name: "test-1",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "gold",
Replicated: defaultPoolSpec.Replicated,
FailureDomain: defaultPoolSpec.FailureDomain,
},
},
{
Name: "test-2",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "silver",
Replicated: defaultPoolSpec.Replicated,
FailureDomain: defaultPoolSpec.FailureDomain,
},
},
},
},
{
label: "DataPoolSpec is set & multiple AdditionalDataPools are set",
sc: &api.StorageCluster{
Spec: api.StorageClusterSpec{
ManagedResources: api.ManagedResourcesSpec{
CephFilesystems: api.ManageCephFilesystems{
DataPoolSpec: cephv1.PoolSpec{
DeviceClass: "gold",
Replicated: cephv1.ReplicatedSpec{
TargetSizeRatio: 0.1,
},
},
AdditionalDataPools: []cephv1.NamedPoolSpec{
{
Name: "test-1",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "silver",
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.25,
},
},
},
{
Name: "test-2",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "bronze",
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.25,
},
},
},
},
},
},
},
},
expectedDataPools: []cephv1.NamedPoolSpec{
{
PoolSpec: cephv1.PoolSpec{
DeviceClass: "gold",
Replicated: cephv1.ReplicatedSpec{
Size: defaultPoolSpec.Replicated.Size,
TargetSizeRatio: 0.1,
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
},
},
{
Name: "test-1",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "silver",
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.25,
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
},
},
{
Name: "test-2",
PoolSpec: cephv1.PoolSpec{
DeviceClass: "bronze",
Replicated: cephv1.ReplicatedSpec{
Size: 2,
TargetSizeRatio: 0.25,
ReplicasPerFailureDomain: defaultPoolSpec.Replicated.ReplicasPerFailureDomain,
},
FailureDomain: defaultPoolSpec.FailureDomain,
},
},
},
},
}

for _, c := range cases {
t.Logf("Case: %s\n", c.label)
var objects []client.Object
t, reconciler, _, _ := initStorageClusterResourceCreateUpdateTest(t, objects, nil)
c.sc.Status.FailureDomain = "zone"
filesystem, err := reconciler.newCephFilesystemInstances(c.sc)
assert.NoError(t, err)
actualDataPools := filesystem[0].Spec.DataPools
assert.Equal(t, c.expectedDataPools, actualDataPools)
}
}
Loading

0 comments on commit 8f94626

Please sign in to comment.