Skip to content

Commit

Permalink
Merge pull request #384 from red-hat-storage/sync_us--main
Browse files Browse the repository at this point in the history
Syncing latest changes from upstream main for ramen
  • Loading branch information
ShyamsundarR authored Oct 30, 2024
2 parents 1734243 + b83b03a commit 32ecdbc
Show file tree
Hide file tree
Showing 12 changed files with 123 additions and 158 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ config/hub/bundle
*~
.vscode
.vimrc
.zed

# Generated files
venv
Expand All @@ -47,4 +48,4 @@ ramenctl/build

# Generated disk images
*.qcow2
*.iso
*.iso
21 changes: 7 additions & 14 deletions e2e/deployers/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

const FiveSecondsDuration = 5 * time.Second

func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.SubscriptionPhase) error {
// sleep to wait for subscription is processed
time.Sleep(FiveSecondsDuration)

startTime := time.Now()

for {
Expand All @@ -34,17 +29,15 @@ func waitSubscriptionPhase(namespace, name string, phase subscriptionv1.Subscrip
return nil
}

if time.Since(startTime) > time.Second*time.Duration(util.Timeout) {
return fmt.Errorf(fmt.Sprintf("subscription %s status is not %s yet before timeout", name, phase))
if time.Since(startTime) > util.Timeout {
return fmt.Errorf("subscription %s status is not %s yet before timeout", name, phase)
}

time.Sleep(time.Second * time.Duration(util.TimeInterval))
time.Sleep(util.RetryInterval)
}
}

func WaitWorkloadHealth(client client.Client, namespace string, w workloads.Workload) error {
time.Sleep(FiveSecondsDuration)

startTime := time.Now()

for {
Expand All @@ -55,13 +48,13 @@ func WaitWorkloadHealth(client client.Client, namespace string, w workloads.Work
return nil
}

if time.Since(startTime) > time.Second*time.Duration(util.Timeout) {
if time.Since(startTime) > util.Timeout {
util.Ctx.Log.Info(err.Error())

return fmt.Errorf(fmt.Sprintf("workload %s is not ready yet before timeout of %v",
w.GetName(), util.Timeout))
return fmt.Errorf("workload %s is not ready yet before timeout of %v",
w.GetName(), util.Timeout)
}

time.Sleep(time.Second * time.Duration(util.TimeInterval))
time.Sleep(util.RetryInterval)
}
}
13 changes: 4 additions & 9 deletions e2e/dractions/actions.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,18 +43,13 @@ func EnableProtection(w workloads.Workload, d deployers.Deployer) error {
placementName := name
drpcName := name

placementDecisionName, err := waitPlacementDecision(util.Ctx.Hub.CtrlClient, namespace, placementName)
if err != nil {
return err
}

placementDecision, err := getPlacementDecision(util.Ctx.Hub.CtrlClient, namespace, placementDecisionName)
placementDecision, err := waitPlacementDecision(util.Ctx.Hub.CtrlClient, namespace, placementName)
if err != nil {
return err
}

clusterName := placementDecision.Status.Decisions[0].ClusterName
util.Ctx.Log.Info("got clusterName " + clusterName + " from " + placementDecisionName)
util.Ctx.Log.Info("got clusterName " + clusterName + " from " + placementDecision.Name)

util.Ctx.Log.Info("update placement " + placementName + " annotation")

Expand Down Expand Up @@ -83,8 +78,8 @@ func EnableProtection(w workloads.Workload, d deployers.Deployer) error {
}

// this is the application namespace in drclusters to add the annotation
nsToAnnonate := name
if err := util.CreateNamespaceAndAddAnnotation(nsToAnnonate); err != nil {
nsToAnnotate := name
if err := util.CreateNamespaceAndAddAnnotation(nsToAnnotate); err != nil {
return err
}

Expand Down
12 changes: 0 additions & 12 deletions e2e/dractions/crud.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,18 +33,6 @@ func updatePlacement(client client.Client, placement *clusterv1beta1.Placement)
return client.Update(context.Background(), placement)
}

func getPlacementDecision(client client.Client, namespace, name string) (*clusterv1beta1.PlacementDecision, error) {
placementDecision := &clusterv1beta1.PlacementDecision{}
key := types.NamespacedName{Namespace: namespace, Name: name}

err := client.Get(context.Background(), key, placementDecision)
if err != nil {
return nil, err
}

return placementDecision, nil
}

func getDRPC(client client.Client, namespace, name string) (*ramen.DRPlacementControl, error) {
drpc := &ramen.DRPlacementControl{}
key := types.NamespacedName{Namespace: namespace, Name: name}
Expand Down
72 changes: 25 additions & 47 deletions e2e/dractions/retry.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,41 +15,31 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

// nolint:gocognit
// return placementDecisionName, error
// waitPlacementDecision waits until we have a placement decision and returns the placement decision object.
func waitPlacementDecision(client client.Client, namespace string, placementName string,
) (string, error) {
) (*v1beta1.PlacementDecision, error) {
startTime := time.Now()
placementDecisionName := ""

for {
placement, err := getPlacement(client, namespace, placementName)
if err != nil {
return "", err
return nil, err
}

for _, cond := range placement.Status.Conditions {
if cond.Type == "PlacementSatisfied" && cond.Status == "True" {
placementDecisionName = placement.Status.DecisionGroups[0].Decisions[0]
if placementDecisionName != "" {
return placementDecisionName, nil
}
}
placementDecision, err := getPlacementDecisionFromPlacement(client, placement)
if err != nil {
return nil, err
}

// if placement is controlled by ramen, it will not have placementdecision name in its status
// so need query placementdecision by label
placementDecision, err := getPlacementDecisionFromPlacement(client, placement)
if err == nil && placementDecision != nil {
return placementDecision.Name, nil
if placementDecision != nil && len(placementDecision.Status.Decisions) > 0 {
return placementDecision, nil
}

if time.Since(startTime) > time.Second*time.Duration(util.Timeout) {
return "", fmt.Errorf(
"could not get placement decision for " + placementName + " before timeout, fail")
if time.Since(startTime) > util.Timeout {
return nil, fmt.Errorf("timeout waiting for placement decisions for %q ", placementName)
}

time.Sleep(time.Second * time.Duration(util.TimeInterval))
time.Sleep(util.RetryInterval)
}
}

Expand All @@ -69,7 +59,7 @@ func waitDRPCReady(client client.Client, namespace string, drpcName string) erro
return nil
}

if time.Since(startTime) > time.Second*time.Duration(util.Timeout) {
if time.Since(startTime) > util.Timeout {
if !conditionReady {
util.Ctx.Log.Info("drpc " + drpcName + " condition 'Available' or 'PeerReady' is not True")
}
Expand All @@ -81,7 +71,7 @@ func waitDRPCReady(client client.Client, namespace string, drpcName string) erro
return fmt.Errorf("drpc " + drpcName + " is not ready yet before timeout, fail")
}

time.Sleep(time.Second * time.Duration(util.TimeInterval))
time.Sleep(util.RetryInterval)
}
}

Expand Down Expand Up @@ -126,29 +116,21 @@ func waitDRPCPhase(client client.Client, namespace, name string, phase ramen.DRS
return nil
}

if time.Since(startTime) > time.Second*time.Duration(util.Timeout) {
return fmt.Errorf(fmt.Sprintf(
"drpc %s status is not %s yet before timeout, fail", name, phase))
if time.Since(startTime) > util.Timeout {
return fmt.Errorf("drpc %s status is not %s yet before timeout, fail", name, phase)
}

time.Sleep(time.Second * time.Duration(util.TimeInterval))
time.Sleep(util.RetryInterval)
}
}

func getCurrentCluster(client client.Client, namespace string, placementName string) (string, error) {
placementDecisionName, err := waitPlacementDecision(client, namespace, placementName)
placementDecision, err := waitPlacementDecision(client, namespace, placementName)
if err != nil {
return "", err
}

placementDecision, err := getPlacementDecision(client, namespace, placementDecisionName)
if err != nil {
return "", err
}

clusterName := placementDecision.Status.Decisions[0].ClusterName

return clusterName, nil
return placementDecision.Status.Decisions[0].ClusterName, nil
}

// return dr cluster client
Expand Down Expand Up @@ -178,8 +160,6 @@ func getTargetCluster(client client.Client, namespace, placementName string, drp

// first wait DRPC to have the expected phase, then check DRPC conditions
func waitDRPC(client client.Client, namespace, name string, expectedPhase ramen.DRState) error {
// sleep to wait for DRPC is processed
time.Sleep(FiveSecondsDuration)
// check Phase
if err := waitDRPCPhase(client, namespace, name, expectedPhase); err != nil {
return err
Expand All @@ -190,8 +170,6 @@ func waitDRPC(client client.Client, namespace, name string, expectedPhase ramen.

func waitDRPCDeleted(client client.Client, namespace string, name string) error {
startTime := time.Now()
// sleep to wait for DRPC is deleted
time.Sleep(FiveSecondsDuration)

for {
_, err := getDRPC(client, namespace, name)
Expand All @@ -205,11 +183,11 @@ func waitDRPCDeleted(client client.Client, namespace string, name string) error
util.Ctx.Log.Info(fmt.Sprintf("error to get drpc %s: %v", name, err))
}

if time.Since(startTime) > time.Second*time.Duration(util.Timeout) {
return fmt.Errorf(fmt.Sprintf("drpc %s is not deleted yet before timeout, fail", name))
if time.Since(startTime) > util.Timeout {
return fmt.Errorf("drpc %s is not deleted yet before timeout, fail", name)
}

time.Sleep(time.Second * time.Duration(util.TimeInterval))
time.Sleep(util.RetryInterval)
}
}

Expand All @@ -230,12 +208,12 @@ func waitDRPCProgression(client client.Client, namespace, name string, progressi
return nil
}

if time.Since(startTime) > time.Second*time.Duration(util.Timeout) {
return fmt.Errorf(fmt.Sprintf("drpc %s progression is not %s yet before timeout of %v",
name, progression, util.Timeout))
if time.Since(startTime) > util.Timeout {
return fmt.Errorf("drpc %s progression is not %s yet before timeout of %v",
name, progression, util.Timeout)
}

time.Sleep(time.Second * time.Duration(util.TimeInterval))
time.Sleep(util.RetryInterval)
}
}

Expand Down
2 changes: 1 addition & 1 deletion e2e/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func TestMain(m *testing.M) {
}

log.Info("Global setting", "Timeout", util.Timeout)
log.Info("Global setting", "Retry Interval", util.TimeInterval)
log.Info("Global setting", "Retry Interval", util.RetryInterval)

os.Exit(m.Run())
}
Expand Down
6 changes: 4 additions & 2 deletions e2e/util/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,13 @@

package util

import "time"

const (
RamenSystemNamespace = "ramen-system"

Timeout = 600 // seconds
TimeInterval = 30 // seconds
Timeout = 600 * time.Second
RetryInterval = 5 * time.Second

defaultChannelName = "ramen-gitops"
defaultChannelNamespace = "ramen-samples"
Expand Down
27 changes: 26 additions & 1 deletion e2e/util/crud.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ package util

import (
"context"
"fmt"
"time"

"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
Expand Down Expand Up @@ -49,9 +51,32 @@ func DeleteNamespace(client client.Client, namespace string) error {
}

Ctx.Log.Info("namespace " + namespace + " not found")

return nil
}

return nil
Ctx.Log.Info("waiting until namespace " + namespace + " is deleted")

startTime := time.Now()
key := types.NamespacedName{Name: namespace}

for {
if err := client.Get(context.Background(), key, ns); err != nil {
if !errors.IsNotFound(err) {
return err
}

Ctx.Log.Info("namespace " + namespace + " deleted")

return nil
}

if time.Since(startTime) > 60*time.Second {
return fmt.Errorf("timeout deleting namespace %q", namespace)
}

time.Sleep(time.Second)
}
}

func createChannel() error {
Expand Down
8 changes: 8 additions & 0 deletions internal/controller/vrg_status_pvcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,14 @@ func FindProtectedPVC(vrg *ramen.VolumeReplicationGroup, pvcNamespaceName, pvcNa
return protectedPvc
}

func (v *VRGInstance) addProtectedPVC(pvcNamespace, pvcName string) *ramen.ProtectedPVC {
protectedPVC := &ramen.ProtectedPVC{Namespace: pvcNamespace, Name: pvcName}

v.instance.Status.ProtectedPVCs = append(v.instance.Status.ProtectedPVCs, *protectedPVC)

return &v.instance.Status.ProtectedPVCs[len(v.instance.Status.ProtectedPVCs)-1]
}

func (v *VRGInstance) pvcStatusDeleteIfPresent(pvcNamespaceName, pvcName string, log logr.Logger) {
pvcStatus, i := FindProtectedPvcAndIndex(v.instance, pvcNamespaceName, pvcName)
if pvcStatus == nil {
Expand Down
Loading

0 comments on commit 32ecdbc

Please sign in to comment.