Skip to content

Commit

Permalink
ODF info CM: reconciler
Browse files Browse the repository at this point in the history
Signed-off-by: raaizik <[email protected]>
  • Loading branch information
raaizik committed Mar 27, 2024
1 parent 493179c commit 2019450
Show file tree
Hide file tree
Showing 7 changed files with 338 additions and 170 deletions.
180 changes: 21 additions & 159 deletions controllers/storagecluster/clusterclaims.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,13 @@ package storagecluster
import (
"context"
"fmt"
"strconv"
"strings"

"github.com/go-logr/logr"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
ocsv1 "github.com/red-hat-storage/ocs-operator/api/v4/v1"
rookCephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
corev1 "k8s.io/api/core/v1"
extensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/version"
"k8s.io/client-go/tools/clientcmd"
clusterclientv1alpha1 "open-cluster-management.io/api/client/cluster/clientset/versioned"
clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
Expand All @@ -25,31 +18,22 @@ import (
)

const (
RookCephMonSecretName = "rook-ceph-mon"
FsidKey = "fsid"
OdfOperatorNamePrefix = "odf-operator"
ClusterClaimCRDName = "clusterclaims.cluster.open-cluster-management.io"
ClusterClaimCRDName = "clusterclaims.cluster.open-cluster-management.io"
)

var (
ClusterClaimGroup = "odf"
OdfVersion = fmt.Sprintf("version.%s.openshift.io", ClusterClaimGroup)
StorageSystemName = fmt.Sprintf("storagesystemname.%s.openshift.io", ClusterClaimGroup)
StorageClusterName = fmt.Sprintf("storageclustername.%s.openshift.io", ClusterClaimGroup)
StorageClusterCount = fmt.Sprintf("count.storageclusters.%s.openshift.io", ClusterClaimGroup)
StorageClusterDROptimized = fmt.Sprintf("droptimized.%s.openshift.io", ClusterClaimGroup)
CephFsid = fmt.Sprintf("cephfsid.%s.openshift.io", ClusterClaimGroup)
ClusterClaimGroup = "odf"
OdfInfoConfig = fmt.Sprintf("odfinfoconfig.%s.openshift.io", ClusterClaimGroup)
)

type ocsClusterClaim struct{}

type ClusterClaimCreator struct {
Context context.Context
Logger logr.Logger
Client client.Client
Values map[string]string
StorageCluster *ocsv1.StorageCluster
StorageClusterCount int
Context context.Context
Logger logr.Logger
Client client.Client
Values map[string]string
StorageCluster *ocsv1.StorageCluster
}

func doesClusterClaimCrdExist(ctx context.Context, client client.Client) (bool, error) {
Expand Down Expand Up @@ -83,43 +67,9 @@ func (obj *ocsClusterClaim) ensureCreated(r *StorageClusterReconciler, instance
StorageCluster: instance,
}

odfVersion, err := creator.getOdfVersion()
if err != nil {
r.Log.Error(err, "failed to get odf version for operator. retrying again")
return reconcile.Result{}, err
}

storageClusterCount := len(r.clusters.GetStorageClusters())

cephFsid, err := creator.getCephFsid()
if err != nil {
r.Log.Error(err, "failed to get ceph fsid from secret. retrying again")
return reconcile.Result{}, err
}

storageSystemName, err := creator.getStorageSystemName()
if err != nil {
r.Log.Error(err, "failed to get storagesystem name. retrying again")
return reconcile.Result{}, err
}

var isDROptimized = "false"
// Set isDROptmized to "false" in case of external clusters as we currently don't have to way to determine
// if external cluster OSDs are using bluestore-rdr
if !instance.Spec.ExternalStorage.Enable {
isDROptimized, err = creator.getIsDROptimized(r.serverVersion)
if err != nil {
r.Log.Error(err, "failed to get cephcluster status. retrying again")
return reconcile.Result{}, err
}
}
odfConfigKeyName := instance.Namespace + "/" + instance.Name + "." + OdfInfoKeyName

err = creator.setStorageClusterCount(strconv.Itoa(storageClusterCount)).
setStorageSystemName(storageSystemName).
setStorageClusterName(instance.Name).
setOdfVersion(odfVersion).
setCephFsid(cephFsid).
setDROptimized(isDROptimized).
err := creator.setOdfInfoConfigKey(odfConfigKeyName).
create()

return reconcile.Result{}, err
Expand Down Expand Up @@ -179,95 +129,12 @@ func (c *ClusterClaimCreator) create() error {

return nil
}
func (c *ClusterClaimCreator) getOdfVersion() (string, error) {
var csvs operatorsv1alpha1.ClusterServiceVersionList
err := c.Client.List(c.Context, &csvs, &client.ListOptions{Namespace: c.StorageCluster.Namespace})
if err != nil {
return "", err
}

for _, csv := range csvs.Items {
if strings.HasPrefix(csv.Name, OdfOperatorNamePrefix) {
return csv.Spec.Version.String(), nil
}
}

return "", fmt.Errorf("failed to find csv with prefix %q", OdfOperatorNamePrefix)
}

func (c *ClusterClaimCreator) getCephFsid() (string, error) {
var rookCephMonSecret corev1.Secret
err := c.Client.Get(c.Context, types.NamespacedName{Name: RookCephMonSecretName, Namespace: c.StorageCluster.Namespace}, &rookCephMonSecret)
if err != nil {
return "", err
}
if val, ok := rookCephMonSecret.Data[FsidKey]; ok {
return string(val), nil
}

return "", fmt.Errorf("failed to fetch ceph fsid from %q secret", RookCephMonSecretName)
}

func (c *ClusterClaimCreator) getIsDROptimized(serverVersion *version.Info) (string, error) {
var cephCluster rookCephv1.CephCluster
err := c.Client.Get(c.Context, types.NamespacedName{Name: generateNameForCephClusterFromString(c.StorageCluster.Name), Namespace: c.StorageCluster.Namespace}, &cephCluster)
if err != nil {
return "false", err
}
if cephCluster.Status.CephStorage == nil || cephCluster.Status.CephStorage.OSD.StoreType == nil {
return "false", fmt.Errorf("cephcluster status does not have OSD store information")
}
bluestorerdr, ok := cephCluster.Status.CephStorage.OSD.StoreType["bluestore-rdr"]
if !ok {
return "false", nil
}
total := getOsdCount(c.StorageCluster, serverVersion)
if bluestorerdr < total {
return "false", nil
}
return "true", nil
}

func (c *ClusterClaimCreator) setStorageClusterCount(count string) *ClusterClaimCreator {
c.Values[StorageClusterCount] = count
return c
}

func (c *ClusterClaimCreator) setStorageSystemName(name string) *ClusterClaimCreator {
c.Values[StorageSystemName] = fmt.Sprintf("%s/%s", name, c.StorageCluster.GetNamespace())
return c
}

func (c *ClusterClaimCreator) setOdfVersion(version string) *ClusterClaimCreator {
c.Values[OdfVersion] = version
return c
}

func (c *ClusterClaimCreator) setStorageClusterName(name string) *ClusterClaimCreator {
c.Values[StorageClusterName] = fmt.Sprintf("%s/%s", name, c.StorageCluster.GetNamespace())
return c
}

func (c *ClusterClaimCreator) setCephFsid(fsid string) *ClusterClaimCreator {
c.Values[CephFsid] = fsid
return c
}

func (c *ClusterClaimCreator) setDROptimized(optimized string) *ClusterClaimCreator {
c.Values[StorageClusterDROptimized] = optimized
func (c *ClusterClaimCreator) setOdfInfoConfigKey(name string) *ClusterClaimCreator {
c.Values[OdfInfoConfig] = name
return c
}

func (c *ClusterClaimCreator) getStorageSystemName() (string, error) {
for _, ref := range c.StorageCluster.OwnerReferences {
if ref.Kind == "StorageSystem" {
return ref.Name, nil
}
}

return "", fmt.Errorf("failed to find parent StorageSystem's name in StorageCluster %q ownerreferences", c.StorageCluster.Name)
}

func (obj *ocsClusterClaim) ensureDeleted(r *StorageClusterReconciler, _ *ocsv1.StorageCluster) (reconcile.Result, error) {
r.Log.Info("deleting ClusterClaim resources")
ctx := context.TODO()
Expand All @@ -277,20 +144,15 @@ func (obj *ocsClusterClaim) ensureDeleted(r *StorageClusterReconciler, _ *ocsv1.
}
return reconcile.Result{}, nil
}
names := []string{OdfVersion, StorageSystemName, StorageClusterName, CephFsid}
for _, name := range names {
cc := clusterv1alpha1.ClusterClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
err := r.Client.Delete(context.TODO(), &cc)
if errors.IsNotFound(err) {
continue
} else if err != nil {
r.Log.Error(err, "failed to delete ClusterClaim", "ClusterClaim", cc.Name)
return reconcile.Result{}, fmt.Errorf("failed to delete %v: %v", cc.Name, err)
}
cc := clusterv1alpha1.ClusterClaim{
ObjectMeta: metav1.ObjectMeta{
Name: OdfInfoConfig,
},
}
err := r.Client.Delete(context.TODO(), &cc)
if err != nil {
r.Log.Error(err, "failed to delete ClusterClaim", "ClusterClaim", cc.Name)
return reconcile.Result{}, fmt.Errorf("failed to delete %v: %v", cc.Name, err)
}

return reconcile.Result{}, nil
Expand Down
24 changes: 23 additions & 1 deletion controllers/storagecluster/initialization_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,11 @@ package storagecluster

import (
"context"
"fmt"
"github.com/blang/semver/v4"
version2 "github.com/operator-framework/api/pkg/lib/version"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
version3 "github.com/red-hat-storage/ocs-operator/v4/version"
"os"
"testing"

Expand Down Expand Up @@ -350,6 +355,23 @@ func createFakeInitializationStorageClusterReconciler(t *testing.T, obj ...runti
Phase: cephv1.ConditionType(util.PhaseReady),
},
}
verOdf, _ := semver.Make(getSemVer(version3.Version, 1, true))
csv := &operatorsv1alpha1.ClusterServiceVersion{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("odf-operator-%s", sc.Name),
Namespace: sc.Namespace,
},
Spec: operatorsv1alpha1.ClusterServiceVersionSpec{
Version: version2.OperatorVersion{Version: verOdf},
},
}

rookCephMonSecret := &v1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "rook-ceph-mon", Namespace: sc.Namespace},
Data: map[string][]byte{
"fsid": []byte("b88c2d78-9de9-4227-9313-a63f62f78743"),
},
}

statusSubresourceObjs := []client.Object{sc}
var runtimeObjects []runtime.Object
Expand All @@ -364,7 +386,7 @@ func createFakeInitializationStorageClusterReconciler(t *testing.T, obj ...runti
}
}

runtimeObjects = append(runtimeObjects, mockNodeList.DeepCopy(), cbp, cfs, cnfs, cnfsbp, cnfssvc, infrastructure, networkConfig)
runtimeObjects = append(runtimeObjects, mockNodeList.DeepCopy(), cbp, cfs, cnfs, cnfsbp, cnfssvc, infrastructure, networkConfig, rookCephMonSecret, csv)
client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(runtimeObjects...).WithStatusSubresource(statusSubresourceObjs...).Build()

return StorageClusterReconciler{
Expand Down
Loading

0 comments on commit 2019450

Please sign in to comment.