From 2019450ddefd7bb498dcf0abd08d4faa8529bdc1 Mon Sep 17 00:00:00 2001 From: raaizik <132667934+raaizik@users.noreply.github.com> Date: Sun, 10 Mar 2024 13:11:55 +0200 Subject: [PATCH] ODF info CM: reconciler Signed-off-by: raaizik <132667934+raaizik@users.noreply.github.com> --- controllers/storagecluster/clusterclaims.go | 180 ++------------ .../initialization_reconciler_test.go | 24 +- controllers/storagecluster/odfinfoconfig.go | 233 ++++++++++++++++++ controllers/storagecluster/reconcile.go | 4 +- .../storagecluster_controller.go | 3 +- .../storagecluster_controller_test.go | 62 ++++- go.mod | 2 +- 7 files changed, 338 insertions(+), 170 deletions(-) create mode 100644 controllers/storagecluster/odfinfoconfig.go diff --git a/controllers/storagecluster/clusterclaims.go b/controllers/storagecluster/clusterclaims.go index 7cad2a4a85..ad2e4af490 100644 --- a/controllers/storagecluster/clusterclaims.go +++ b/controllers/storagecluster/clusterclaims.go @@ -3,20 +3,13 @@ package storagecluster import ( "context" "fmt" - "strconv" - "strings" - "github.com/go-logr/logr" - operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" - rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" - corev1 "k8s.io/api/core/v1" extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/tools/clientcmd" clusterclientv1alpha1 "open-cluster-management.io/api/client/cluster/clientset/versioned" clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" @@ -25,31 +18,22 @@ import ( ) const ( - RookCephMonSecretName = "rook-ceph-mon" - FsidKey = "fsid" - OdfOperatorNamePrefix = "odf-operator" - ClusterClaimCRDName = "clusterclaims.cluster.open-cluster-management.io" + ClusterClaimCRDName = "clusterclaims.cluster.open-cluster-management.io" ) var ( - ClusterClaimGroup = "odf" - OdfVersion = fmt.Sprintf("version.%s.openshift.io", ClusterClaimGroup) - StorageSystemName = fmt.Sprintf("storagesystemname.%s.openshift.io", ClusterClaimGroup) - StorageClusterName = fmt.Sprintf("storageclustername.%s.openshift.io", ClusterClaimGroup) - StorageClusterCount = fmt.Sprintf("count.storageclusters.%s.openshift.io", ClusterClaimGroup) - StorageClusterDROptimized = fmt.Sprintf("droptimized.%s.openshift.io", ClusterClaimGroup) - CephFsid = fmt.Sprintf("cephfsid.%s.openshift.io", ClusterClaimGroup) + ClusterClaimGroup = "odf" + OdfInfoConfig = fmt.Sprintf("odfinfoconfig.%s.openshift.io", ClusterClaimGroup) ) type ocsClusterClaim struct{} type ClusterClaimCreator struct { - Context context.Context - Logger logr.Logger - Client client.Client - Values map[string]string - StorageCluster *ocsv1.StorageCluster - StorageClusterCount int + Context context.Context + Logger logr.Logger + Client client.Client + Values map[string]string + StorageCluster *ocsv1.StorageCluster } func doesClusterClaimCrdExist(ctx context.Context, client client.Client) (bool, error) { @@ -83,43 +67,9 @@ func (obj *ocsClusterClaim) ensureCreated(r *StorageClusterReconciler, instance StorageCluster: instance, } - odfVersion, err := creator.getOdfVersion() - if err != nil { - r.Log.Error(err, "failed to get odf version for operator. retrying again") - return reconcile.Result{}, err - } - - storageClusterCount := len(r.clusters.GetStorageClusters()) - - cephFsid, err := creator.getCephFsid() - if err != nil { - r.Log.Error(err, "failed to get ceph fsid from secret. retrying again") - return reconcile.Result{}, err - } - - storageSystemName, err := creator.getStorageSystemName() - if err != nil { - r.Log.Error(err, "failed to get storagesystem name. retrying again") - return reconcile.Result{}, err - } - - var isDROptimized = "false" - // Set isDROptmized to "false" in case of external clusters as we currently don't have to way to determine - // if external cluster OSDs are using bluestore-rdr - if !instance.Spec.ExternalStorage.Enable { - isDROptimized, err = creator.getIsDROptimized(r.serverVersion) - if err != nil { - r.Log.Error(err, "failed to get cephcluster status. retrying again") - return reconcile.Result{}, err - } - } + odfConfigKeyName := instance.Namespace + "/" + instance.Name + "." + OdfInfoKeyName - err = creator.setStorageClusterCount(strconv.Itoa(storageClusterCount)). - setStorageSystemName(storageSystemName). - setStorageClusterName(instance.Name). - setOdfVersion(odfVersion). - setCephFsid(cephFsid). - setDROptimized(isDROptimized). + err := creator.setOdfInfoConfigKey(odfConfigKeyName). create() return reconcile.Result{}, err @@ -179,95 +129,12 @@ func (c *ClusterClaimCreator) create() error { return nil } -func (c *ClusterClaimCreator) getOdfVersion() (string, error) { - var csvs operatorsv1alpha1.ClusterServiceVersionList - err := c.Client.List(c.Context, &csvs, &client.ListOptions{Namespace: c.StorageCluster.Namespace}) - if err != nil { - return "", err - } - - for _, csv := range csvs.Items { - if strings.HasPrefix(csv.Name, OdfOperatorNamePrefix) { - return csv.Spec.Version.String(), nil - } - } - - return "", fmt.Errorf("failed to find csv with prefix %q", OdfOperatorNamePrefix) -} - -func (c *ClusterClaimCreator) getCephFsid() (string, error) { - var rookCephMonSecret corev1.Secret - err := c.Client.Get(c.Context, types.NamespacedName{Name: RookCephMonSecretName, Namespace: c.StorageCluster.Namespace}, &rookCephMonSecret) - if err != nil { - return "", err - } - if val, ok := rookCephMonSecret.Data[FsidKey]; ok { - return string(val), nil - } - - return "", fmt.Errorf("failed to fetch ceph fsid from %q secret", RookCephMonSecretName) -} - -func (c *ClusterClaimCreator) getIsDROptimized(serverVersion *version.Info) (string, error) { - var cephCluster rookCephv1.CephCluster - err := c.Client.Get(c.Context, types.NamespacedName{Name: generateNameForCephClusterFromString(c.StorageCluster.Name), Namespace: c.StorageCluster.Namespace}, &cephCluster) - if err != nil { - return "false", err - } - if cephCluster.Status.CephStorage == nil || cephCluster.Status.CephStorage.OSD.StoreType == nil { - return "false", fmt.Errorf("cephcluster status does not have OSD store information") - } - bluestorerdr, ok := cephCluster.Status.CephStorage.OSD.StoreType["bluestore-rdr"] - if !ok { - return "false", nil - } - total := getOsdCount(c.StorageCluster, serverVersion) - if bluestorerdr < total { - return "false", nil - } - return "true", nil -} - -func (c *ClusterClaimCreator) setStorageClusterCount(count string) *ClusterClaimCreator { - c.Values[StorageClusterCount] = count - return c -} - -func (c *ClusterClaimCreator) setStorageSystemName(name string) *ClusterClaimCreator { - c.Values[StorageSystemName] = fmt.Sprintf("%s/%s", name, c.StorageCluster.GetNamespace()) - return c -} - -func (c *ClusterClaimCreator) setOdfVersion(version string) *ClusterClaimCreator { - c.Values[OdfVersion] = version - return c -} - -func (c *ClusterClaimCreator) setStorageClusterName(name string) *ClusterClaimCreator { - c.Values[StorageClusterName] = fmt.Sprintf("%s/%s", name, c.StorageCluster.GetNamespace()) - return c -} - -func (c *ClusterClaimCreator) setCephFsid(fsid string) *ClusterClaimCreator { - c.Values[CephFsid] = fsid - return c -} -func (c *ClusterClaimCreator) setDROptimized(optimized string) *ClusterClaimCreator { - c.Values[StorageClusterDROptimized] = optimized +func (c *ClusterClaimCreator) setOdfInfoConfigKey(name string) *ClusterClaimCreator { + c.Values[OdfInfoConfig] = name return c } -func (c *ClusterClaimCreator) getStorageSystemName() (string, error) { - for _, ref := range c.StorageCluster.OwnerReferences { - if ref.Kind == "StorageSystem" { - return ref.Name, nil - } - } - - return "", fmt.Errorf("failed to find parent StorageSystem's name in StorageCluster %q ownerreferences", c.StorageCluster.Name) -} - func (obj *ocsClusterClaim) ensureDeleted(r *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) { r.Log.Info("deleting ClusterClaim resources") ctx := context.TODO() @@ -277,20 +144,15 @@ func (obj *ocsClusterClaim) ensureDeleted(r *StorageClusterReconciler, _ *ocsv1. } return reconcile.Result{}, nil } - names := []string{OdfVersion, StorageSystemName, StorageClusterName, CephFsid} - for _, name := range names { - cc := clusterv1alpha1.ClusterClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - } - err := r.Client.Delete(context.TODO(), &cc) - if errors.IsNotFound(err) { - continue - } else if err != nil { - r.Log.Error(err, "failed to delete ClusterClaim", "ClusterClaim", cc.Name) - return reconcile.Result{}, fmt.Errorf("failed to delete %v: %v", cc.Name, err) - } + cc := clusterv1alpha1.ClusterClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: OdfInfoConfig, + }, + } + err := r.Client.Delete(context.TODO(), &cc) + if err != nil { + r.Log.Error(err, "failed to delete ClusterClaim", "ClusterClaim", cc.Name) + return reconcile.Result{}, fmt.Errorf("failed to delete %v: %v", cc.Name, err) } return reconcile.Result{}, nil diff --git a/controllers/storagecluster/initialization_reconciler_test.go b/controllers/storagecluster/initialization_reconciler_test.go index 68b935b823..d25979dcb8 100644 --- a/controllers/storagecluster/initialization_reconciler_test.go +++ b/controllers/storagecluster/initialization_reconciler_test.go @@ -2,6 +2,11 @@ package storagecluster import ( "context" + "fmt" + "github.com/blang/semver/v4" + version2 "github.com/operator-framework/api/pkg/lib/version" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + version3 "github.com/red-hat-storage/ocs-operator/v4/version" "os" "testing" @@ -350,6 +355,23 @@ func createFakeInitializationStorageClusterReconciler(t *testing.T, obj ...runti Phase: cephv1.ConditionType(util.PhaseReady), }, } + verOdf, _ := semver.Make(getSemVer(version3.Version, 1, true)) + csv := &operatorsv1alpha1.ClusterServiceVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("odf-operator-%s", sc.Name), + Namespace: sc.Namespace, + }, + Spec: operatorsv1alpha1.ClusterServiceVersionSpec{ + Version: version2.OperatorVersion{Version: verOdf}, + }, + } + + rookCephMonSecret := &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "rook-ceph-mon", Namespace: sc.Namespace}, + Data: map[string][]byte{ + "fsid": []byte("b88c2d78-9de9-4227-9313-a63f62f78743"), + }, + } statusSubresourceObjs := []client.Object{sc} var runtimeObjects []runtime.Object @@ -364,7 +386,7 @@ func createFakeInitializationStorageClusterReconciler(t *testing.T, obj ...runti } } - runtimeObjects = append(runtimeObjects, mockNodeList.DeepCopy(), cbp, cfs, cnfs, cnfsbp, cnfssvc, infrastructure, networkConfig) + runtimeObjects = append(runtimeObjects, mockNodeList.DeepCopy(), cbp, cfs, cnfs, cnfsbp, cnfssvc, infrastructure, networkConfig, rookCephMonSecret, csv) client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(runtimeObjects...).WithStatusSubresource(statusSubresourceObjs...).Build() return StorageClusterReconciler{ diff --git a/controllers/storagecluster/odfinfoconfig.go b/controllers/storagecluster/odfinfoconfig.go new file mode 100644 index 0000000000..ce874667e5 --- /dev/null +++ b/controllers/storagecluster/odfinfoconfig.go @@ -0,0 +1,233 @@ +package storagecluster + +import ( + "fmt" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1" + "github.com/red-hat-storage/ocs-operator/v4/controllers/util" + rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/yaml" + "strings" + "sync" +) + +type ConnectedClient struct { + NamespacedName types.NamespacedName `yaml:"metadata"` + ClusterID string `yaml:"clusterId"` +} +type InfoStorageCluster struct { + NamespacedName types.NamespacedName `yaml:"metadata"` + StorageProviderEndpoint string `yaml:"storageProviderEndpoint"` + CephClusterFSID string `yaml:"cephClusterFSID"` +} + +type OdfInfoData struct { + OdfVersion string `yaml:"odfVersion"` + OdfDeploymentType string `yaml:"odfDeploymentType"` + Clients []ConnectedClient `yaml:"clients"` + StorageCluster InfoStorageCluster `yaml:"storageCluster"` + StorageClusterCount int `yaml:"storageClusterCount"` + StorageSystemName string `yaml:"storageSystemName"` + IsDROptimized bool `yaml:"isDROptimized"` +} + +const ( + OdfInfoKeyName = "config.yaml" + OdfDeploymentTypeExternal = "external" + OdfDeploymentTypeInternal = "internal" + RookCephMonSecretName = "rook-ceph-mon" + FsidKey = "fsid" + OdfOperatorNamePrefix = "odf-operator" + OdfInfoConfigMapName = "odf-info" + OdfInfoMapKind = "ConfigMap" +) + +type odfInfoConfig struct { + mutex sync.RWMutex +} + +// ensureCreated ensures that a ConfigMap resource exists with its Spec in +// the desired state. +func (obj *odfInfoConfig) ensureCreated(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (reconcile.Result, error) { + operatorNamespace, err := util.GetOperatorNamespace() + if err != nil { + return reconcile.Result{}, err + } + + odfInfoConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: OdfInfoConfigMapName, + Namespace: operatorNamespace, + }, + } + + _, err = ctrl.CreateOrUpdate(r.ctx, r.Client, odfInfoConfigMap, func() error { + if err := ctrl.SetControllerReference(sc, odfInfoConfigMap, r.Scheme); err != nil { + return err + } + r.Log.Info("Creating or updating odf-info configmap", OdfInfoMapKind, klog.KRef(sc.Namespace, OdfInfoConfigMapName)) + obj.mutex.Lock() + odfInfoData, configErr := getOdfInfoData(r, sc) + if configErr != nil { + return fmt.Errorf("failed to get ODF info config data: %w", configErr) + } + odfInfoKey := obj.getOdfInfoKeyName(sc) + // Creates or appends to the data map + odfInfoConfigMap.Data[odfInfoKey] = odfInfoData + obj.mutex.Unlock() + return nil + }) + if err != nil { + r.Log.Error(err, "failed to create or update odf-info config", OdfInfoMapKind, klog.KRef(sc.Namespace, OdfInfoConfigMapName)) + return reconcile.Result{}, fmt.Errorf("failed to create or update odf-info config: %w", err) + } + return reconcile.Result{}, nil +} + +func (obj *odfInfoConfig) getOdfInfoKeyName(sc *ocsv1.StorageCluster) string { + return sc.Namespace + "/" + sc.Name + "." + OdfInfoKeyName +} + +// ensureDeleted is dummy func for the odfInfoConfig +func (obj *odfInfoConfig) ensureDeleted(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (reconcile.Result, error) { + operatorNamespace, err := util.GetOperatorNamespace() + if err != nil { + return reconcile.Result{}, err + } + var odfInfoConfigMap corev1.ConfigMap + if err = r.Client.Get(r.ctx, types.NamespacedName{Namespace: operatorNamespace}, &odfInfoConfigMap); err != nil { + return reconcile.Result{}, err + } + + if len(odfInfoConfigMap.Data) > 1 { + obj.mutex.Lock() + delete(odfInfoConfigMap.Data, obj.getOdfInfoKeyName(sc)) + obj.mutex.Unlock() + } else { + obj.mutex.Lock() + if err = r.Client.Delete(r.ctx, &odfInfoConfigMap); err != nil { + r.Log.Error(err, "Failed to delete odf-info.", "ConfigMap", klog.KRef(odfInfoConfigMap.Namespace, odfInfoConfigMap.Name)) + return reconcile.Result{}, fmt.Errorf("failed to delete odf-info %v: %v", odfInfoConfigMap.Name, err) + } + obj.mutex.Unlock() + } + return reconcile.Result{}, nil +} + +func getOdfInfoData(r *StorageClusterReconciler, sc *ocsv1.StorageCluster) (string, error) { + var odfVersion, cephFSId string + var err error + if odfVersion, err = getOdfVersion(r, sc); err != nil { + return "", err + } + if cephFSId, err = getCephFsid(r, sc); err != nil { + return "", err + } + var odfDeploymentType string + if sc.Spec.ExternalStorage.Enable { + odfDeploymentType = OdfDeploymentTypeExternal + } else { + odfDeploymentType = OdfDeploymentTypeInternal + } + var isDROptimized = false + // Set isDROptmized to "false" in case of external clusters as we currently don't have to way to determine + // if external cluster OSDs are using bluestore-rdr + if !sc.Spec.ExternalStorage.Enable { + if isDROptimized, err = getIsDROptimized(r, sc); err != nil { + return "", err + + } + } + var storageSystemName string + if storageSystemName, err = getStorageSystemName(sc); err != nil { + return "", err + } + storageClusterCount := len(r.clusters.GetStorageClusters()) + + var data = OdfInfoData{ + OdfVersion: odfVersion, + OdfDeploymentType: odfDeploymentType, + StorageClusterCount: storageClusterCount, + IsDROptimized: isDROptimized, + StorageSystemName: storageSystemName, + // Clients array is populated with the onboarding request's fields via server.go + Clients: []ConnectedClient{}, + StorageCluster: InfoStorageCluster{ + NamespacedName: types.NamespacedName{Name: sc.Name, Namespace: sc.Namespace}, + StorageProviderEndpoint: sc.Status.StorageProviderEndpoint, + CephClusterFSID: cephFSId, + }, + } + yamlData, err := yaml.Marshal(data) + if err != nil { + return "", err + } + return string(yamlData), nil + +} + +func getStorageSystemName(storageCluster *ocsv1.StorageCluster) (string, error) { + for i := range storageCluster.OwnerReferences { + ref := &storageCluster.OwnerReferences[i] + if ref.Kind == "StorageSystem" { + return ref.Name, nil + } + } + + return "", fmt.Errorf("failed to find parent StorageSystem's name in StorageCluster %q ownerreferences, %v", storageCluster.Name, storageCluster.OwnerReferences) + +} + +func getIsDROptimized(r *StorageClusterReconciler, storageCluster *ocsv1.StorageCluster) (bool, error) { + var cephCluster rookCephv1.CephCluster + err := r.Client.Get(r.ctx, types.NamespacedName{Name: generateNameForCephClusterFromString(storageCluster.Name), Namespace: storageCluster.Namespace}, &cephCluster) + if err != nil { + return false, err + } + if cephCluster.Status.CephStorage == nil || cephCluster.Status.CephStorage.OSD.StoreType == nil { + return false, fmt.Errorf("cephcluster %v status does not have OSD store information, %v", cephCluster.Name, storageCluster.Name) + } + bluestorerdr, ok := cephCluster.Status.CephStorage.OSD.StoreType["bluestore-rdr"] + if !ok { + return false, nil + } + total := getOsdCount(storageCluster, r.serverVersion) + if bluestorerdr < total { + return false, nil + } + return true, nil +} + +func getOdfVersion(r *StorageClusterReconciler, storageCluster *ocsv1.StorageCluster) (string, error) { + var csvs operatorsv1alpha1.ClusterServiceVersionList + err := r.Client.List(r.ctx, &csvs, client.InNamespace(storageCluster.Namespace)) + if err != nil { + return "", err + } + for _, csv := range csvs.Items { + if strings.HasPrefix(csv.Name, OdfOperatorNamePrefix) { + return csv.Spec.Version.String(), nil + } + } + + return "", fmt.Errorf("failed to find csv with prefix %q", OdfOperatorNamePrefix) +} + +func getCephFsid(r *StorageClusterReconciler, storageCluster *ocsv1.StorageCluster) (string, error) { + var rookCephMonSecret corev1.Secret + if err := r.Client.Get(r.ctx, types.NamespacedName{Name: RookCephMonSecretName, Namespace: storageCluster.Namespace}, &rookCephMonSecret); err != nil { + return "", err + } + if val, ok := rookCephMonSecret.Data[FsidKey]; ok { + return string(val), nil + } + + return "", fmt.Errorf("failed to fetch ceph fsid from %q secret", RookCephMonSecretName) +} diff --git a/controllers/storagecluster/reconcile.go b/controllers/storagecluster/reconcile.go index 5ae001c94e..b4d48d6d5d 100644 --- a/controllers/storagecluster/reconcile.go +++ b/controllers/storagecluster/reconcile.go @@ -409,7 +409,7 @@ func (r *StorageClusterReconciler) reconcilePhases( &ocsSnapshotClass{}, &ocsJobTemplates{}, &ocsCephRbdMirrors{}, - &ocsClusterClaim{}, + &odfInfoConfig{}, } } else { // noobaa-only ensure functions @@ -427,7 +427,7 @@ func (r *StorageClusterReconciler) reconcilePhases( &ocsCephCluster{}, &ocsSnapshotClass{}, &ocsNoobaaSystem{}, - &ocsClusterClaim{}, + &odfInfoConfig{}, } } diff --git a/controllers/storagecluster/storagecluster_controller.go b/controllers/storagecluster/storagecluster_controller.go index 2bb2cf2322..b2e8b38f6f 100644 --- a/controllers/storagecluster/storagecluster_controller.go +++ b/controllers/storagecluster/storagecluster_controller.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "reflect" "github.com/go-logr/logr" nbv1 "github.com/noobaa/noobaa-operator/v5/pkg/apis/noobaa/v1alpha1" @@ -161,7 +162,7 @@ func (r *StorageClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } oldObj := e.ObjectOld.(*ocsv1alpha1.StorageConsumer) newObj := e.ObjectNew.(*ocsv1alpha1.StorageConsumer) - return oldObj.Status.Client.OperatorVersion != newObj.Status.Client.OperatorVersion + return reflect.DeepEqual(oldObj.Status.Client, newObj.Status.Client) }, } diff --git a/controllers/storagecluster/storagecluster_controller_test.go b/controllers/storagecluster/storagecluster_controller_test.go index 79c2240e89..c838aa7436 100644 --- a/controllers/storagecluster/storagecluster_controller_test.go +++ b/controllers/storagecluster/storagecluster_controller_test.go @@ -3,6 +3,8 @@ package storagecluster import ( "context" "fmt" + version2 "github.com/operator-framework/api/pkg/lib/version" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" "net" "os" "regexp" @@ -69,6 +71,13 @@ var mockStorageCluster = &api.StorageCluster{ CleanupPolicyAnnotation: string(CleanupPolicyDelete), }, Finalizers: []string{storageClusterFinalizer}, + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "StorageSystem", + Name: "storage-test", + UID: "asd2f", + }, + }, }, Spec: api.StorageClusterSpec{ Monitoring: &api.MonitoringSpec{ @@ -96,10 +105,19 @@ var mockStorageClusterWithArbiter = &api.StorageCluster{ } var mockCephCluster = &rookCephv1.CephCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "CephCluster", + }, ObjectMeta: metav1.ObjectMeta{ Name: generateNameForCephCluster(mockStorageCluster.DeepCopy()), Namespace: mockStorageCluster.Namespace, }, + Status: rookCephv1.ClusterStatus{ + CephStorage: &rookCephv1.CephStorage{ + DeviceClasses: []cephv1.DeviceClasses{{Name: DeviceTypeSSD}}, + OSD: rookCephv1.OSDStatus{StoreType: map[string]int{"bluestore-rdr": 1}}, + }, + }, } var mockCephClusterNamespacedName = types.NamespacedName{ @@ -802,6 +820,8 @@ func TestNonWatchedReconcileWithNoCephClusterType(t *testing.T) { mockNodeList.DeepCopyInto(nodeList) infra := &configv1.Infrastructure{} mockInfrastructure.DeepCopyInto(infra) + cc := &rookCephv1.CephCluster{} + mockCephCluster.DeepCopyInto(cc) cr := &api.StorageCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "storage-test", @@ -814,13 +834,14 @@ func TestNonWatchedReconcileWithNoCephClusterType(t *testing.T) { }, } - reconciler := createFakeStorageClusterReconciler(t, cr, nodeList, infra) + reconciler := createFakeStorageClusterReconciler(t, cr, cc, nodeList, infra) result, err := reconciler.Reconcile(context.TODO(), mockStorageClusterRequest) assert.NoError(t, err) assert.Equal(t, reconcile.Result{}, result) } func TestNonWatchedReconcileWithTheCephClusterType(t *testing.T) { + testSkipPrometheusRules = true nodeList := &corev1.NodeList{} mockNodeList.DeepCopyInto(nodeList) cc := &rookCephv1.CephCluster{} @@ -858,8 +879,9 @@ func TestStorageDeviceSets(t *testing.T) { "type": "gp2-csi", }, } - - reconciler := createFakeStorageClusterReconciler(t, storageClassEBS) + cc := &rookCephv1.CephCluster{} + mockCephCluster.DeepCopyInto(cc) + reconciler := createFakeStorageClusterReconciler(t, storageClassEBS, cc) testcases := []struct { label string @@ -1023,7 +1045,9 @@ func TestStorageClusterFinalizer(t *testing.T) { SelfLink: "/api/v1/namespaces/openshift-storage/noobaa/noobaa", }, } - reconciler := createFakeStorageClusterReconciler(t, mockStorageCluster.DeepCopy(), noobaaMock.DeepCopy(), nodeList, infra, networkConfig) + cc := &rookCephv1.CephCluster{} + mockCephCluster.DeepCopyInto(cc) + reconciler := createFakeStorageClusterReconciler(t, mockStorageCluster.DeepCopy(), cc, noobaaMock.DeepCopy(), nodeList, infra, networkConfig) result, err := reconciler.Reconcile(context.TODO(), mockStorageClusterRequest) assert.NoError(t, err) @@ -1122,7 +1146,23 @@ func createFakeStorageClusterReconciler(t *testing.T, obj ...runtime.Object) Sto Phase: cephv1.ConditionType(util.PhaseReady), }, } - obj = append(obj, cbp, cfs) + verOdf, _ := semver.Make(getSemVer(version.Version, 1, true)) + csv := &operatorsv1alpha1.ClusterServiceVersion{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("odf-operator-%s", sc.Name), + Namespace: namespace, + }, + Spec: operatorsv1alpha1.ClusterServiceVersionSpec{ + Version: version2.OperatorVersion{Version: verOdf}, + }, + } + rookCephMonSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "rook-ceph-mon", Namespace: namespace}, + Data: map[string][]byte{ + "fsid": []byte("b88c2d78-9de9-4227-9313-a63f62f78743"), + }, + } + obj = append(obj, cbp, cfs, rookCephMonSecret, csv) client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(obj...).WithStatusSubresource(sc).Build() clusters, err := util.GetClusters(context.TODO(), client) @@ -1219,6 +1259,11 @@ func createFakeScheme(t *testing.T) *runtime.Scheme { assert.Fail(t, "failed to add ocsclientv1a1 scheme") } + err = operatorsv1alpha1.AddToScheme(scheme) + if err != nil { + assert.Fail(t, "unable to add operatorsv1alpha1 to scheme") + } + return scheme } @@ -1277,7 +1322,12 @@ func TestStorageClusterOnMultus(t *testing.T) { }, } } - reconciler := createFakeInitializationStorageClusterReconciler(t) + + cc := &rookCephv1.CephCluster{} + mockCephCluster.DeepCopyInto(cc) + cc.ObjectMeta.Name = generateNameForCephCluster(c.cr) + + reconciler := createFakeInitializationStorageClusterReconciler(t, cc) _ = reconciler.Client.Create(context.TODO(), c.cr) result, err := reconciler.Reconcile(context.TODO(), request) if c.testCase != "default" { diff --git a/go.mod b/go.mod index c6b1aeb1be..e18afc865d 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( k8s.io/utils v0.0.0-20240102154912-e7106e64919e open-cluster-management.io/api v0.12.0 sigs.k8s.io/controller-runtime v0.16.3 + sigs.k8s.io/yaml v1.4.0 ) require ( @@ -138,7 +139,6 @@ require ( sigs.k8s.io/container-object-storage-interface-api v0.1.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect ) replace github.com/portworx/sched-ops => github.com/portworx/sched-ops v0.20.4-openstorage-rc3 // required by rook