Skip to content

Commit

Permalink
Merge pull request #2606 from sp98/migrate-osds-to-bluestore
Browse files Browse the repository at this point in the history
Migrate osds to bluestore
  • Loading branch information
openshift-merge-bot[bot] authored Jul 22, 2024
2 parents 92a5407 + 263a4a9 commit 9200d55
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 142 deletions.
52 changes: 6 additions & 46 deletions controllers/storagecluster/cephcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,7 @@ const (
networkProvider = "multus"
publicNetworkSelectorKey = "public"
clusterNetworkSelectorKey = "cluster"
// DisasterRecoveryTargetAnnotation signifies that the cluster is intended to be used for Disaster Recovery
DisasterRecoveryTargetAnnotation = "ocs.openshift.io/clusterIsDisasterRecoveryTarget"
upmapReadBalancerMode = "upmap-read"
upmapReadBalancerMode = "upmap-read"
)

const (
Expand Down Expand Up @@ -269,9 +267,8 @@ func (obj *ocsCephCluster) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.
// Record actual Ceph container image version before attempting update
sc.Status.Images.Ceph.ActualImage = found.Spec.CephVersion.Image

// Allow migration of OSD to bluestore-rdr if RDR optimization annotation is added on an existing cluster.
// Prevent changing the bluestore-rdr settings if they are already applied in the existing ceph cluster.
cephCluster.Spec.Storage.Store = determineOSDStore(sc, cephCluster.Spec.Storage.Store, found.Spec.Storage.Store)
// Update OSD store to `bluestore`
cephCluster.Spec.Storage.Store = updateOSDStore(found.Spec.Storage.Store)

// Add it to the list of RelatedObjects if found
objectRef, err := reference.GetReference(r.Scheme, found)
Expand Down Expand Up @@ -419,11 +416,6 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, kmsConfigMap *co
MaxLogSize: &maxLogSize,
}

osdStore := getOSDStoreConfig(sc)
if osdStore.Type != "" {
reqLogger.Info("osd store settings", osdStore)
}

cephCluster := &rookCephv1.CephCluster{
ObjectMeta: metav1.ObjectMeta{
Name: generateNameForCephCluster(sc),
Expand Down Expand Up @@ -454,7 +446,6 @@ func newCephCluster(sc *ocsv1.StorageCluster, cephImage string, kmsConfigMap *co
},
Storage: rookCephv1.StorageScopeSpec{
StorageClassDeviceSets: newStorageClassDeviceSets(sc),
Store: osdStore,
FlappingRestartIntervalHours: 24,
FullRatio: sc.Spec.ManagedResources.CephCluster.FullRatio,
NearFullRatio: sc.Spec.ManagedResources.CephCluster.NearFullRatio,
Expand Down Expand Up @@ -1297,45 +1288,14 @@ func getIPFamilyConfig(c client.Client) (rookCephv1.IPFamilyType, bool, error) {
return rookCephv1.IPv4, false, nil
}

func getOSDStoreConfig(sc *ocsv1.StorageCluster) rookCephv1.OSDStore {
osdStore := rookCephv1.OSDStore{}
if !sc.Spec.ExternalStorage.Enable && optimizeDisasterRecovery(sc) {
osdStore.Type = string(rookCephv1.StoreTypeBlueStoreRDR)
}

return osdStore
}

// optimizeDisasterRecovery returns true if any RDR optimizations are required
func optimizeDisasterRecovery(sc *ocsv1.StorageCluster) bool {
if annotation, found := sc.GetAnnotations()[DisasterRecoveryTargetAnnotation]; found {
if annotation == "true" {
return true
}
}

return false
}

func determineOSDStore(sc *ocsv1.StorageCluster, newOSDStore, existingOSDStore rookCephv1.OSDStore) rookCephv1.OSDStore {
func updateOSDStore(existingOSDStore rookCephv1.OSDStore) rookCephv1.OSDStore {
if existingOSDStore.Type == string(rookCephv1.StoreTypeBlueStoreRDR) {
return existingOSDStore
} else if !sc.Spec.ExternalStorage.Enable && (isBluestore(existingOSDStore) && optimizeDisasterRecovery(sc)) {
return rookCephv1.OSDStore{
Type: string(rookCephv1.StoreTypeBlueStoreRDR),
Type: string(rookCephv1.StoreTypeBlueStore),
UpdateStore: "yes-really-update-store",
}
}

return newOSDStore
}

func isBluestore(store rookCephv1.OSDStore) bool {
if store.Type == string(rookCephv1.StoreTypeBlueStore) || store.Type == "" {
return true
}

return false
return existingOSDStore
}

func getOsdCount(sc *ocsv1.StorageCluster) int {
Expand Down
71 changes: 8 additions & 63 deletions controllers/storagecluster/cephcluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1475,82 +1475,27 @@ func TestGetIPFamilyConfig(t *testing.T) {
}
}

func TestCephClusterStoreType(t *testing.T) {
sc := &ocsv1.StorageCluster{}

t.Run("ensure no bluestore optimization", func(t *testing.T) {
actual := newCephCluster(sc, "", nil, log)
assert.Equal(t, "", actual.Spec.Storage.Store.Type)
})

t.Run("ensure bluestore optimization based on annotation for internal clusters", func(t *testing.T) {
annotations := map[string]string{
DisasterRecoveryTargetAnnotation: "true",
}
sc.Annotations = annotations
actual := newCephCluster(sc, "", nil, log)
assert.Equal(t, "bluestore-rdr", actual.Spec.Storage.Store.Type)
})

t.Run("ensure no bluestore optimization for external clusters", func(t *testing.T) {
sc.Spec.ExternalStorage.Enable = true
actual := newCephCluster(sc, "", nil, log)
assert.Equal(t, "", actual.Spec.Storage.Store.Type)
})
}

func TestEnsureRDROptmizations(t *testing.T) {
func TestEnsureRDRMigration(t *testing.T) {
testSkipPrometheusRules = true
sc := &ocsv1.StorageCluster{}
mockStorageCluster.DeepCopyInto(sc)
sc.Status.Images.Ceph = &ocsv1.ComponentImageStatus{}
sc.Annotations[DisasterRecoveryTargetAnnotation] = "true"
reconciler := createFakeStorageClusterReconciler(t, networkConfig)

// Ensure bluestore-rdr store type if RDR optimization annotation is added
var obj ocsCephCluster
_, err := obj.ensureCreated(&reconciler, sc)
assert.NilError(t, err)
actual := &rookCephv1.CephCluster{}
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: generateNameForCephClusterFromString(sc.Name), Namespace: sc.Namespace}, actual)
assert.NilError(t, err)
assert.Equal(t, string(rookCephv1.StoreTypeBlueStoreRDR), actual.Spec.Storage.Store.Type)
expected := newCephCluster(mockStorageCluster.DeepCopy(), "", nil, log)

// Ensure bluestoreRDR store is not overridden if required annotations are removed later on
testSkipPrometheusRules = true
delete(sc.Annotations, DisasterRecoveryTargetAnnotation)
_, err = obj.ensureCreated(&reconciler, sc)
expected.Spec.Storage.Store.Type = string(rookCephv1.StoreTypeBlueStoreRDR)
err := reconciler.Client.Create(context.TODO(), expected)
assert.NilError(t, err)
actual = &rookCephv1.CephCluster{}
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: generateNameForCephClusterFromString(sc.Name), Namespace: sc.Namespace}, actual)
assert.NilError(t, err)
assert.Equal(t, string(rookCephv1.StoreTypeBlueStoreRDR), actual.Spec.Storage.Store.Type)
}

func TestEnsureRDRMigration(t *testing.T) {
sc := &ocsv1.StorageCluster{}
mockStorageCluster.DeepCopyInto(sc)
sc.Status.Images.Ceph = &ocsv1.ComponentImageStatus{}
reconciler := createFakeStorageClusterReconciler(t, networkConfig)

// Ensure bluestore store type if RDR optimization annotation is not added
// Ensure bluestore-rdr store type is reset to bluestore
var obj ocsCephCluster
_, err := obj.ensureCreated(&reconciler, sc)
assert.NilError(t, err)
actual := &rookCephv1.CephCluster{}
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: generateNameForCephClusterFromString(sc.Name), Namespace: sc.Namespace}, actual)
assert.NilError(t, err)
assert.Equal(t, "", actual.Spec.Storage.Store.Type)
assert.Equal(t, "", actual.Spec.Storage.Store.UpdateStore)

// Ensure bluestoreRDR migration is set if RDR optimization annotation is added later on
testSkipPrometheusRules = true
sc.Annotations[DisasterRecoveryTargetAnnotation] = "true"
_, err = obj.ensureCreated(&reconciler, sc)
assert.NilError(t, err)
actual = &rookCephv1.CephCluster{}
actual := &rookCephv1.CephCluster{}
err = reconciler.Client.Get(context.TODO(), types.NamespacedName{Name: generateNameForCephClusterFromString(sc.Name), Namespace: sc.Namespace}, actual)
assert.NilError(t, err)
assert.Equal(t, string(rookCephv1.StoreTypeBlueStoreRDR), actual.Spec.Storage.Store.Type)
assert.Equal(t, string(rookCephv1.StoreTypeBlueStore), actual.Spec.Storage.Store.Type)
assert.Equal(t, "yes-really-update-store", actual.Spec.Storage.Store.UpdateStore)
}

Expand Down
33 changes: 0 additions & 33 deletions docs/design/disaster-recovery-optimization.md

This file was deleted.

0 comments on commit 9200d55

Please sign in to comment.