diff --git a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml index e005fa1a7..71bc7af8e 100644 --- a/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml +++ b/deploy/cluster-manager/olm-catalog/latest/manifests/cluster-manager.clusterserviceversion.yaml @@ -59,9 +59,9 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-05-22T01:34:44Z" + createdAt: "2024-06-06T01:37:53Z" description: Manages the installation and upgrade of the ClusterManager. - operators.operatorframework.io/builder: operator-sdk-v1.32.0 + operators.operatorframework.io/builder: operator-sdk-v1.28.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/open-cluster-management-io/ocm support: Open Cluster Management Community diff --git a/deploy/cluster-manager/olm-catalog/latest/metadata/annotations.yaml b/deploy/cluster-manager/olm-catalog/latest/metadata/annotations.yaml index 90ee68c20..b044eda8a 100644 --- a/deploy/cluster-manager/olm-catalog/latest/metadata/annotations.yaml +++ b/deploy/cluster-manager/olm-catalog/latest/metadata/annotations.yaml @@ -6,6 +6,6 @@ annotations: operators.operatorframework.io.bundle.package.v1: cluster-manager operators.operatorframework.io.bundle.channels.v1: stable operators.operatorframework.io.bundle.channel.default.v1: stable - operators.operatorframework.io.metrics.builder: operator-sdk-v1.32.0 + operators.operatorframework.io.metrics.builder: operator-sdk-v1.28.0 operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 diff --git a/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml b/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml index 0c0bb49ac..e16592d3f 100644 --- a/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml +++ b/deploy/klusterlet/olm-catalog/latest/manifests/klusterlet.clusterserviceversion.yaml @@ -31,9 +31,9 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-05-22T01:34:44Z" + createdAt: "2024-06-06T01:37:53Z" description: Manages the installation and upgrade of the Klusterlet. - operators.operatorframework.io/builder: operator-sdk-v1.32.0 + operators.operatorframework.io/builder: operator-sdk-v1.28.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/open-cluster-management-io/ocm support: Open Cluster Management Community diff --git a/deploy/klusterlet/olm-catalog/latest/metadata/annotations.yaml b/deploy/klusterlet/olm-catalog/latest/metadata/annotations.yaml index 052fcfc1d..3fcb580f5 100644 --- a/deploy/klusterlet/olm-catalog/latest/metadata/annotations.yaml +++ b/deploy/klusterlet/olm-catalog/latest/metadata/annotations.yaml @@ -6,6 +6,6 @@ annotations: operators.operatorframework.io.bundle.package.v1: klusterlet operators.operatorframework.io.bundle.channels.v1: stable operators.operatorframework.io.bundle.channel.default.v1: stable - operators.operatorframework.io.metrics.builder: operator-sdk-v1.32.0 + operators.operatorframework.io.metrics.builder: operator-sdk-v1.28.0 operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 diff --git a/go.mod b/go.mod index fb17897c3..ea57bbd69 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( k8s.io/kube-aggregator v0.29.3 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556 - open-cluster-management.io/api v0.13.1-0.20240521030453-9d94703b9eba + open-cluster-management.io/api v0.13.1-0.20240605083248-f9e7f50520fc open-cluster-management.io/sdk-go v0.13.1-0.20240520073308-f18d198a844d sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 diff --git a/go.sum b/go.sum index e26752d4a..745f0a54f 100644 --- a/go.sum +++ b/go.sum @@ -467,8 +467,8 @@ k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0g k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556 h1:X3vJEx9agC94l7SitpWZFDshISdL1niqVH0+diyqfJo= open-cluster-management.io/addon-framework v0.9.1-0.20240419070222-e703fc5a2556/go.mod h1:HayKCznnlyW+0dUJQGj5sNR6i3tvylSySD3YnvZkBtY= -open-cluster-management.io/api v0.13.1-0.20240521030453-9d94703b9eba h1:UsXnD4/N7pxYupPgoLvTq8wO73V72vD2D2ZkDd4iws0= -open-cluster-management.io/api v0.13.1-0.20240521030453-9d94703b9eba/go.mod h1:yrNuMMpciXjXPnj2yznb6LTyrGliiTrFZAJDp/Ck3c4= +open-cluster-management.io/api v0.13.1-0.20240605083248-f9e7f50520fc h1:tcfncubZRFphYtDXBE7ApBNlSnj1RNazhW+8F01XYYg= +open-cluster-management.io/api v0.13.1-0.20240605083248-f9e7f50520fc/go.mod h1:ltijKJhDifrPH0csvCUmFt5lzaERv+BBfh6X3l83rT0= open-cluster-management.io/sdk-go v0.13.1-0.20240520073308-f18d198a844d h1:5lcrL1DsQdNtDQU6U2oXwLAN0EBczcvI421YNgEzL/4= open-cluster-management.io/sdk-go v0.13.1-0.20240520073308-f18d198a844d/go.mod h1:XBrldz+AqVBy9miOVNIu+6l8JXS18i795XbTqIqURJU= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= diff --git a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index f0d86d922..2e4f54966 100644 --- a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -284,11 +284,14 @@ spec: server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False - with the reason of ApplyConflict. + with the reason of ApplyConflict. ReadOnly type means + the agent will only check the existence of the resource + based on its metadata. enum: - Update - CreateOnly - ServerSideApply + - ReadOnly type: string required: - type diff --git a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml index c3b0133ec..f9152ba4d 100644 --- a/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml +++ b/manifests/cluster-manager/hub/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml @@ -253,11 +253,14 @@ spec: means to update resource using server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the - status of False with the reason of ApplyConflict. + status of False with the reason of ApplyConflict. ReadOnly + type means the agent will only check the existence of + the resource based on its metadata. enum: - Update - CreateOnly - ServerSideApply + - ReadOnly type: string required: - type diff --git a/manifests/cluster-manager/hub/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml b/manifests/cluster-manager/hub/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml index 6a063e773..898dadbab 100644 --- a/manifests/cluster-manager/hub/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml +++ b/manifests/cluster-manager/hub/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml @@ -271,11 +271,14 @@ spec: server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False - with the reason of ApplyConflict. + with the reason of ApplyConflict. ReadOnly type means + the agent will only check the existence of the resource + based on its metadata. enum: - Update - CreateOnly - ServerSideApply + - ReadOnly type: string required: - type diff --git a/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler.go b/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler.go index 2f68d71ad..2e96475da 100644 --- a/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler.go +++ b/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -31,17 +30,7 @@ func (d *cmaProgressingReconciler) reconcile( continue } - isUpgrade := false - - for _, configReference := range installProgression.ConfigReferences { - if configReference.LastAppliedConfig != nil { - isUpgrade = true - break - } - } - setAddOnInstallProgressionsAndLastApplied(&cmaCopy.Status.InstallProgressions[i], - isUpgrade, placementNode.countAddonUpgrading(), placementNode.countAddonUpgradeSucceed(), placementNode.countAddonUpgradeFailed(), @@ -59,45 +48,23 @@ func (d *cmaProgressingReconciler) reconcile( func setAddOnInstallProgressionsAndLastApplied( installProgression *addonv1alpha1.InstallProgression, - isUpgrade bool, progressing, done, failed, timeout, total int) { - // always update progressing condition when there is no config - // skip update progressing condition when last applied config already the same as desired - skip := len(installProgression.ConfigReferences) > 0 - for _, configReference := range installProgression.ConfigReferences { - if !equality.Semantic.DeepEqual(configReference.LastAppliedConfig, configReference.DesiredConfig) && - !equality.Semantic.DeepEqual(configReference.LastKnownGoodConfig, configReference.DesiredConfig) { - skip = false - } - } - if skip { - return - } + condition := metav1.Condition{ Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, } if (total == 0 && done == 0) || (done != total) { condition.Status = metav1.ConditionTrue - if isUpgrade { - condition.Reason = addonv1alpha1.ProgressingReasonUpgrading - condition.Message = fmt.Sprintf("%d/%d upgrading..., %d failed %d timeout.", progressing+done, total, failed, timeout) - } else { - condition.Reason = addonv1alpha1.ProgressingReasonInstalling - condition.Message = fmt.Sprintf("%d/%d installing..., %d failed %d timeout.", progressing+done, total, failed, timeout) - } + condition.Reason = addonv1alpha1.ProgressingReasonProgressing + condition.Message = fmt.Sprintf("%d/%d progressing..., %d failed %d timeout.", progressing+done, total, failed, timeout) } else { for i, configRef := range installProgression.ConfigReferences { installProgression.ConfigReferences[i].LastAppliedConfig = configRef.DesiredConfig.DeepCopy() installProgression.ConfigReferences[i].LastKnownGoodConfig = configRef.DesiredConfig.DeepCopy() } condition.Status = metav1.ConditionFalse - if isUpgrade { - condition.Reason = addonv1alpha1.ProgressingReasonUpgradeSucceed - condition.Message = fmt.Sprintf("%d/%d upgrade completed with no errors, %d failed %d timeout.", done, total, failed, timeout) - } else { - condition.Reason = addonv1alpha1.ProgressingReasonInstallSucceed - condition.Message = fmt.Sprintf("%d/%d install completed with no errors, %d failed %d timeout.", done, total, failed, timeout) - } + condition.Reason = addonv1alpha1.ProgressingReasonCompleted + condition.Message = fmt.Sprintf("%d/%d completed with no errors, %d failed %d timeout.", done, total, failed, timeout) } meta.SetStatusCondition(&installProgression.Conditions, condition) } diff --git a/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler_test.go b/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler_test.go index f2bd8f5a7..46586749a 100644 --- a/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler_test.go +++ b/pkg/addon/controllers/addonconfiguration/cma_progressing_reconciler_test.go @@ -92,10 +92,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling { + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonProgressing { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Reason) } - if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/2 installing..., 0 failed 0 timeout." { + if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/2 progressing..., 0 failed 0 timeout." { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message) } }, @@ -185,10 +185,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling { + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonProgressing { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Reason) } - if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..., 0 failed 0 timeout." { + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 progressing..., 0 failed 0 timeout." { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message) } }, @@ -268,10 +268,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstallSucceed { + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonCompleted { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } - if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 install completed with no errors, 0 failed 0 timeout." { + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 completed with no errors, 0 failed 0 timeout." { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } }, @@ -344,10 +344,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgrading { + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonProgressing { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } - if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 upgrading..., 0 failed 0 timeout." { + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 progressing..., 0 failed 0 timeout." { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } }, @@ -431,10 +431,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { cma.Status.InstallProgressions[0].ConfigReferences[0].DesiredConfig) { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgradeSucceed { + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonCompleted { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } - if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 upgrade completed with no errors, 0 failed 0 timeout." { + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/1 completed with no errors, 0 failed 0 timeout." { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } }, @@ -517,10 +517,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonUpgrading { + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonProgressing { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } - if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/1 upgrading..., 0 failed 0 timeout." { + if cma.Status.InstallProgressions[0].Conditions[0].Message != "0/1 progressing..., 0 failed 0 timeout." { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } }, @@ -592,10 +592,10 @@ func TestMgmtAddonProgressingReconcile(t *testing.T) { if cma.Status.InstallProgressions[0].ConfigReferences[0].LastKnownGoodConfig != nil { t.Errorf("InstallProgressions LastKnownGoodConfig is not correct: %v", cma.Status.InstallProgressions[0].ConfigReferences[0]) } - if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonInstalling { + if cma.Status.InstallProgressions[0].Conditions[0].Reason != addonv1alpha1.ProgressingReasonProgressing { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions) } - if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 installing..., 0 failed 0 timeout." { + if cma.Status.InstallProgressions[0].Conditions[0].Message != "1/2 progressing..., 0 failed 0 timeout." { t.Errorf("InstallProgressions condition is not correct: %v", cma.Status.InstallProgressions[0].Conditions[0].Message) } }, diff --git a/pkg/addon/controllers/addonconfiguration/graph.go b/pkg/addon/controllers/addonconfiguration/graph.go index 96a21dfee..32700f244 100644 --- a/pkg/addon/controllers/addonconfiguration/graph.go +++ b/pkg/addon/controllers/addonconfiguration/graph.go @@ -77,10 +77,10 @@ func (n *addonNode) setRolloutStatus() { // desired config spec hash matches actual, but last applied config spec hash doesn't match actual } else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) { switch progressingCond.Reason { - case addonv1alpha1.ProgressingReasonInstallFailed, addonv1alpha1.ProgressingReasonUpgradeFailed: + case addonv1alpha1.ProgressingReasonFailed: n.status.Status = clustersdkv1alpha1.Failed n.status.LastTransitionTime = &progressingCond.LastTransitionTime - case addonv1alpha1.ProgressingReasonInstalling, addonv1alpha1.ProgressingReasonUpgrading: + case addonv1alpha1.ProgressingReasonProgressing: n.status.Status = clustersdkv1alpha1.Progressing n.status.LastTransitionTime = &progressingCond.LastTransitionTime default: @@ -96,7 +96,7 @@ func (n *addonNode) setRolloutStatus() { // succeed n.status.Status = clustersdkv1alpha1.Succeeded - if progressingCond.Reason == addonv1alpha1.ProgressingReasonInstallSucceed || progressingCond.Reason == addonv1alpha1.ProgressingReasonUpgradeSucceed { + if progressingCond.Reason == addonv1alpha1.ProgressingReasonCompleted { n.status.LastTransitionTime = &progressingCond.LastTransitionTime } } diff --git a/pkg/addon/controllers/addonconfiguration/graph_test.go b/pkg/addon/controllers/addonconfiguration/graph_test.go index 403c49079..8313925b5 100644 --- a/pkg/addon/controllers/addonconfiguration/graph_test.go +++ b/pkg/addon/controllers/addonconfiguration/graph_test.go @@ -228,7 +228,7 @@ func TestConfigurationGraph(t *testing.T) { }, []metav1.Condition{ { Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, - Reason: addonv1alpha1.ProgressingReasonUpgradeFailed, + Reason: addonv1alpha1.ProgressingReasonFailed, LastTransitionTime: fakeTime, }, }), @@ -245,7 +245,7 @@ func TestConfigurationGraph(t *testing.T) { }, []metav1.Condition{ { Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, - Reason: addonv1alpha1.ProgressingReasonUpgrading, + Reason: addonv1alpha1.ProgressingReasonProgressing, LastTransitionTime: fakeTime, }, }), @@ -266,7 +266,7 @@ func TestConfigurationGraph(t *testing.T) { }, []metav1.Condition{ { Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, - Reason: addonv1alpha1.ProgressingReasonUpgradeSucceed, + Reason: addonv1alpha1.ProgressingReasonCompleted, LastTransitionTime: fakeTime, }, }), @@ -287,7 +287,7 @@ func TestConfigurationGraph(t *testing.T) { }, []metav1.Condition{ { Type: addonv1alpha1.ManagedClusterAddOnConditionProgressing, - Reason: addonv1alpha1.ProgressingReasonUpgradeSucceed, + Reason: addonv1alpha1.ProgressingReasonCompleted, LastTransitionTime: fakeTime, }, }), diff --git a/pkg/addon/controllers/addonprogressing/controller.go b/pkg/addon/controllers/addonprogressing/controller.go index f5b83a065..48602ffe6 100644 --- a/pkg/addon/controllers/addonprogressing/controller.go +++ b/pkg/addon/controllers/addonprogressing/controller.go @@ -32,12 +32,6 @@ import ( "open-cluster-management.io/ocm/pkg/common/queue" ) -const ( - ProgressingDoing string = "Doing" - ProgressingSucceed string = "Succeed" - ProgressingFailed string = "Failed" -) - // addonProgressingController reconciles instances of managedclusteraddon on the hub // based to update the status progressing condition and last applied config type addonProgressingController struct { @@ -172,21 +166,12 @@ func (c *addonProgressingController) updateAddonProgressingAndLastApplied( } } - // set upgrade flag - isUpgrade := false - for _, configReference := range newaddon.Status.ConfigReferences { - if configReference.LastAppliedConfig != nil && configReference.LastAppliedConfig.SpecHash != "" { - isUpgrade = true - break - } - } - // get addon works requirement, _ := labels.NewRequirement(addonapiv1alpha1.AddonLabelKey, selection.Equals, []string{newaddon.Name}) selector := labels.NewSelector().Add(*requirement) addonWorks, err := c.workLister.ManifestWorks(newaddon.Namespace).List(selector) if err != nil { - setAddOnProgressingAndLastApplied(isUpgrade, ProgressingFailed, err.Error(), newaddon) + setAddOnProgressingAndLastApplied(addonapiv1alpha1.ProgressingReasonFailed, err.Error(), newaddon) return patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status) } @@ -194,14 +179,14 @@ func (c *addonProgressingController) updateAddonProgressingAndLastApplied( // get hosted addon works hostedAddonWorks, err := c.workLister.ManifestWorks(hostingClusterName).List(selector) if err != nil { - setAddOnProgressingAndLastApplied(isUpgrade, ProgressingFailed, err.Error(), newaddon) + setAddOnProgressingAndLastApplied(addonapiv1alpha1.ProgressingReasonFailed, err.Error(), newaddon) return patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status) } addonWorks = append(addonWorks, hostedAddonWorks...) } if len(addonWorks) == 0 { - setAddOnProgressingAndLastApplied(isUpgrade, ProgressingDoing, "no addon works", newaddon) + setAddOnProgressingAndLastApplied(addonapiv1alpha1.ProgressingReasonProgressing, "no addon works", newaddon) return patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status) } @@ -214,19 +199,19 @@ func (c *addonProgressingController) updateAddonProgressingAndLastApplied( // check if work configs matches addon configs if !workConfigsMatchesAddon(klog.FromContext(ctx), work, newaddon) { - setAddOnProgressingAndLastApplied(isUpgrade, ProgressingDoing, "configs mismatch", newaddon) + setAddOnProgressingAndLastApplied(addonapiv1alpha1.ProgressingReasonProgressing, "mca and work configs mismatch", newaddon) return patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status) } // check if work is ready if !workIsReady(work) { - setAddOnProgressingAndLastApplied(isUpgrade, ProgressingDoing, "work is not ready", newaddon) + setAddOnProgressingAndLastApplied(addonapiv1alpha1.ProgressingReasonProgressing, "work is not ready", newaddon) return patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status) } } // set lastAppliedConfig when all the work matches addon and are ready. - setAddOnProgressingAndLastApplied(isUpgrade, ProgressingSucceed, "", newaddon) + setAddOnProgressingAndLastApplied(addonapiv1alpha1.ProgressingReasonCompleted, "", newaddon) return patcher.PatchStatus(ctx, newaddon, newaddon.Status, oldaddon.Status) } @@ -295,7 +280,7 @@ func workIsReady(work *workapiv1.ManifestWork) bool { } // set addon progressing condition and last applied -func setAddOnProgressingAndLastApplied(isUpgrade bool, status string, message string, addon *addonapiv1alpha1.ManagedClusterAddOn) { +func setAddOnProgressingAndLastApplied(reason string, message string, addon *addonapiv1alpha1.ManagedClusterAddOn) { // always update progressing condition when there is no config // skip update progressing condition when last applied config already the same as desired skip := len(addon.Status.ConfigReferences) > 0 @@ -309,39 +294,22 @@ func setAddOnProgressingAndLastApplied(isUpgrade bool, status string, message st } condition := metav1.Condition{ - Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, + Reason: reason, } - switch status { - case ProgressingDoing: + switch reason { + case addonapiv1alpha1.ProgressingReasonProgressing: condition.Status = metav1.ConditionTrue - if isUpgrade { - condition.Reason = addonapiv1alpha1.ProgressingReasonUpgrading - condition.Message = fmt.Sprintf("upgrading... %v", message) - } else { - condition.Reason = addonapiv1alpha1.ProgressingReasonInstalling - condition.Message = fmt.Sprintf("installing... %v", message) - } - case ProgressingSucceed: + condition.Message = fmt.Sprintf("progressing... %v", message) + case addonapiv1alpha1.ProgressingReasonCompleted: condition.Status = metav1.ConditionFalse for i, configReference := range addon.Status.ConfigReferences { addon.Status.ConfigReferences[i].LastAppliedConfig = configReference.DesiredConfig.DeepCopy() } - if isUpgrade { - condition.Reason = addonapiv1alpha1.ProgressingReasonUpgradeSucceed - condition.Message = "upgrade completed with no errors." - } else { - condition.Reason = addonapiv1alpha1.ProgressingReasonInstallSucceed - condition.Message = "install completed with no errors." - } - case ProgressingFailed: + condition.Message = "completed with no errors." + case addonapiv1alpha1.ProgressingReasonFailed: condition.Status = metav1.ConditionFalse - if isUpgrade { - condition.Reason = addonapiv1alpha1.ProgressingReasonUpgradeFailed - condition.Message = message - } else { - condition.Reason = addonapiv1alpha1.ProgressingReasonInstallFailed - condition.Message = message - } + condition.Message = message } meta.SetStatusCondition(&addon.Status.Conditions, condition) } diff --git a/pkg/addon/controllers/addonprogressing/controller_test.go b/pkg/addon/controllers/addonprogressing/controller_test.go index bb8ee4793..d7e8106cb 100644 --- a/pkg/addon/controllers/addonprogressing/controller_test.go +++ b/pkg/addon/controllers/addonprogressing/controller_test.go @@ -101,7 +101,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && configCond.Status == metav1.ConditionTrue) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } }, @@ -168,7 +168,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && configCond.Status == metav1.ConditionTrue) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } if len(addOn.Status.ConfigReferences) != 0 { @@ -238,7 +238,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && configCond.Status == metav1.ConditionTrue) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } if len(addOn.Status.ConfigReferences) != 0 { @@ -308,7 +308,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgrading && configCond.Status == metav1.ConditionTrue) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } if len(addOn.Status.ConfigReferences) != 0 { @@ -378,7 +378,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgrading && configCond.Status == metav1.ConditionTrue) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } if len(addOn.Status.ConfigReferences) != 0 { @@ -448,7 +448,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstallSucceed && configCond.Status == metav1.ConditionFalse) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonCompleted && configCond.Status == metav1.ConditionFalse) { t.Errorf("Condition Progressing is incorrect") } if len(addOn.Status.ConfigReferences) != 1 { @@ -521,7 +521,7 @@ func TestReconcile(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgradeSucceed && configCond.Status == metav1.ConditionFalse) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonCompleted && configCond.Status == metav1.ConditionFalse) { t.Errorf("Condition Progressing is incorrect") } if len(addOn.Status.ConfigReferences) != 1 { @@ -736,7 +736,7 @@ func TestReconcileHostedAddons(t *testing.T) { configCond := meta.FindStatusCondition( addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) if !(configCond != nil && - configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && + configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } @@ -818,7 +818,7 @@ func TestReconcileHostedAddons(t *testing.T) { configCond := meta.FindStatusCondition( addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) if !(configCond != nil && - configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && + configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } @@ -899,7 +899,7 @@ func TestReconcileHostedAddons(t *testing.T) { t.Fatal(err) } configCond := meta.FindStatusCondition(addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) - if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonInstalling && configCond.Status == metav1.ConditionTrue) { + if !(configCond != nil && configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } if len(addOn.Status.ConfigReferences) != 0 { @@ -981,7 +981,7 @@ func TestReconcileHostedAddons(t *testing.T) { configCond := meta.FindStatusCondition( addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) if !(configCond != nil && - configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgrading && + configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } @@ -1064,7 +1064,7 @@ func TestReconcileHostedAddons(t *testing.T) { configCond := meta.FindStatusCondition( addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) if !(configCond != nil && - configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgrading && + configCond.Reason == addonapiv1alpha1.ProgressingReasonProgressing && configCond.Status == metav1.ConditionTrue) { t.Errorf("Condition Progressing is incorrect") } @@ -1147,7 +1147,7 @@ func TestReconcileHostedAddons(t *testing.T) { configCond := meta.FindStatusCondition( addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) if !(configCond != nil && - configCond.Reason == addonapiv1alpha1.ProgressingReasonInstallSucceed && + configCond.Reason == addonapiv1alpha1.ProgressingReasonCompleted && configCond.Status == metav1.ConditionFalse) { t.Errorf("Condition Progressing is incorrect") } @@ -1235,7 +1235,7 @@ func TestReconcileHostedAddons(t *testing.T) { configCond := meta.FindStatusCondition( addOn.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnConditionProgressing) if !(configCond != nil && - configCond.Reason == addonapiv1alpha1.ProgressingReasonUpgradeSucceed && + configCond.Reason == addonapiv1alpha1.ProgressingReasonCompleted && configCond.Status == metav1.ConditionFalse) { t.Errorf("Condition Progressing is incorrect") } diff --git a/test/integration/addon/addon_manager_upgrade_test.go b/test/integration/addon/addon_manager_upgrade_test.go index 1e4b0fac3..c04e2014c 100644 --- a/test/integration/addon/addon_manager_upgrade_test.go +++ b/test/integration/addon/addon_manager_upgrade_test.go @@ -231,8 +231,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonInstallSucceed, - Message: "install completed with no errors.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "completed with no errors.", }) } @@ -272,8 +272,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonInstallSucceed, - Message: "4/4 install completed with no errors, 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "4/4 completed with no errors, 0 failed 0 timeout.", }) ginkgo.By("update all") @@ -310,8 +310,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "upgrade completed with no errors.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "completed with no errors.", }) } @@ -351,8 +351,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "4/4 upgrade completed with no errors, 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "4/4 completed with no errors, 0 failed 0 timeout.", }) ginkgo.By("update work status to avoid addon status update") @@ -407,8 +407,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "upgrading... work is not ready", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "progressing... work is not ready", }) } for i := 2; i < 4; i++ { @@ -440,8 +440,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "upgrade completed with no errors.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "completed with no errors.", }) } @@ -481,22 +481,22 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "2/4 upgrading..., 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "2/4 progressing..., 0 failed 0 timeout.", }) ginkgo.By("timeout after ProgressDeadline 5s and stop rollout since breach MaxFailures 1") assertClusterManagementAddOnNoConditions(testAddOnConfigsImpl.name, start, 5*time.Second, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "0/4 upgrading..., 0 failed 2 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "0/4 progressing..., 0 failed 2 timeout.", }) assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "0/4 upgrading..., 0 failed 2 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "0/4 progressing..., 0 failed 2 timeout.", }) ginkgo.By("update timeouted work status to continue rollout since within MaxFailures 1") @@ -534,8 +534,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "upgrade completed with no errors.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "completed with no errors.", }) } @@ -543,8 +543,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "4/4 upgrading..., 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "4/4 progressing..., 0 failed 0 timeout.", }) ginkgo.By("update another 2 work status to trigger addon status") @@ -582,8 +582,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "upgrade completed with no errors.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "completed with no errors.", }) } ginkgo.By("check cma status") @@ -622,8 +622,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "4/4 upgrade completed with no errors, 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "4/4 completed with no errors, 0 failed 0 timeout.", }) ginkgo.By("update work status to avoid addon status update") @@ -679,8 +679,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "upgrading... work is not ready", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "progressing... work is not ready", }) } for i := 2; i < 4; i++ { @@ -712,8 +712,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "upgrade completed with no errors.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "completed with no errors.", }) } @@ -753,8 +753,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "2/4 upgrading..., 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "2/4 progressing..., 0 failed 0 timeout.", }) ginkgo.By("update 2 work status to trigger addon status") @@ -765,14 +765,14 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnNoConditions(testAddOnConfigsImpl.name, start, 3*time.Second, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "4/4 upgrading..., 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "4/4 progressing..., 0 failed 0 timeout.", }) assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionTrue, - Reason: addonapiv1alpha1.ProgressingReasonUpgrading, - Message: "4/4 upgrading..., 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonProgressing, + Message: "4/4 progressing..., 0 failed 0 timeout.", }) ginkgo.By("check mca status") @@ -805,8 +805,8 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertManagedClusterAddOnConditions(testAddOnConfigsImpl.name, clusterNames[i], metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "upgrade completed with no errors.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "completed with no errors.", }) } @@ -852,14 +852,14 @@ var _ = ginkgo.Describe("Addon upgrade", func() { assertClusterManagementAddOnNoConditions(testAddOnConfigsImpl.name, start, 3*time.Second, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "4/4 upgrade completed with no errors, 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "4/4 completed with no errors, 0 failed 0 timeout.", }) assertClusterManagementAddOnConditions(testAddOnConfigsImpl.name, metav1.Condition{ Type: addonapiv1alpha1.ManagedClusterAddOnConditionProgressing, Status: metav1.ConditionFalse, - Reason: addonapiv1alpha1.ProgressingReasonUpgradeSucceed, - Message: "4/4 upgrade completed with no errors, 0 failed 0 timeout.", + Reason: addonapiv1alpha1.ProgressingReasonCompleted, + Message: "4/4 completed with no errors, 0 failed 0 timeout.", }) ginkgo.By("check cma status") diff --git a/vendor/modules.txt b/vendor/modules.txt index 189154208..2c322bac4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1518,7 +1518,7 @@ open-cluster-management.io/addon-framework/pkg/basecontroller/events open-cluster-management.io/addon-framework/pkg/basecontroller/factory open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.13.1-0.20240521030453-9d94703b9eba +# open-cluster-management.io/api v0.13.1-0.20240605083248-f9e7f50520fc ## explicit; go 1.21 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml index 6a063e773..898dadbab 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml @@ -271,11 +271,14 @@ spec: server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False - with the reason of ApplyConflict. + with the reason of ApplyConflict. ReadOnly type means + the agent will only check the existence of the resource + based on its metadata. enum: - Update - CreateOnly - ServerSideApply + - ReadOnly type: string required: - type diff --git a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go index abc458aad..9d7b11d11 100644 --- a/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go +++ b/vendor/open-cluster-management.io/api/addon/v1alpha1/types_managedclusteraddon.go @@ -415,29 +415,17 @@ const ( // the reason of condition ManagedClusterAddOnConditionProgressing const ( - // ProgressingReasonInstalling is the reason of condition Progressing indicating the addon configuration is - // installing. - ProgressingReasonInstalling = "Installing" + // ProgressingReasonProgressing is the reason of condition Progressing indicating the addon configuration is + // applying. + ProgressingReasonProgressing = "Progressing" - // ProgressingReasonInstallSucceed is the reason of condition Progressing indicating the addon configuration is - // installed successfully. - ProgressingReasonInstallSucceed = "InstallSucceed" + // ProgressingReasonCompleted is the reason of condition Progressing indicating the addon configuration is + // applied successfully. + ProgressingReasonCompleted = "Completed" - // ProgressingReasonInstallFailed is the reason of condition Progressing indicating the addon configuration is - // installed failed. - ProgressingReasonInstallFailed = "InstallFailed" - - // ProgressingReasonUpgrading is the reason of condition Progressing indicating the addon configuration is - // upgrading. - ProgressingReasonUpgrading = "Upgrading" - - // ProgressingReasonUpgradeSucceed is the reason of condition Progressing indicating the addon configuration is - // upgraded successfully. - ProgressingReasonUpgradeSucceed = "UpgradeSucceed" - - // ProgressingReasonUpgradeFailed is the reason of condition Progressing indicating the addon configuration is - // upgraded failed. - ProgressingReasonUpgradeFailed = "UpgradeFailed" + // ProgressingReasonFailed is the reason of condition Progressing indicating the addon configuration + // failed to apply. + ProgressingReasonFailed = "Failed" // ProgressingReasonWaitingForCanary is the reason of condition Progressing indicating the addon configuration // upgrade is pending and waiting for canary is done. diff --git a/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml b/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml index c3b0133ec..f9152ba4d 100644 --- a/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml +++ b/vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml @@ -253,11 +253,14 @@ spec: means to update resource using server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the - status of False with the reason of ApplyConflict. + status of False with the reason of ApplyConflict. ReadOnly + type means the agent will only check the existence of + the resource based on its metadata. enum: - Update - CreateOnly - ServerSideApply + - ReadOnly type: string required: - type diff --git a/vendor/open-cluster-management.io/api/work/v1/types.go b/vendor/open-cluster-management.io/api/work/v1/types.go index 39b8ec2b2..7ab0865a5 100644 --- a/vendor/open-cluster-management.io/api/work/v1/types.go +++ b/vendor/open-cluster-management.io/api/work/v1/types.go @@ -162,8 +162,9 @@ type UpdateStrategy struct { // ServerSideApply type means to update resource using server side apply with work-controller as the field manager. // If there is conflict, the related Applied condition of manifest will be in the status of False with the // reason of ApplyConflict. + // ReadOnly type means the agent will only check the existence of the resource based on its metadata. // +kubebuilder:default=Update - // +kubebuilder:validation:Enum=Update;CreateOnly;ServerSideApply + // +kubebuilder:validation:Enum=Update;CreateOnly;ServerSideApply;ReadOnly // +kubebuilder:validation:Required // +required Type UpdateStrategyType `json:"type,omitempty"` @@ -177,18 +178,23 @@ type UpdateStrategy struct { type UpdateStrategyType string const ( - // Update type means to update resource by an update call. + // UpdateStrategyTypeUpdate means to update resource by an update call. UpdateStrategyTypeUpdate UpdateStrategyType = "Update" - // CreateOnly type means do not update resource based on current manifest. This should be used only when + // UpdateStrategyTypeCreateOnly means do not update resource based on current manifest. This should be used only when // ServerSideApply type is not support on the spoke, and the user on hub would like some other controller // on the spoke to own the control of the resource. UpdateStrategyTypeCreateOnly UpdateStrategyType = "CreateOnly" - // ServerSideApply type means to update resource using server side apply with work-controller as the field manager. + // UpdateStrategyTypeServerSideApply means to update resource using server side apply with work-controller as the field manager. // If there is conflict, the related Applied condition of manifest will be in the status of False with the // reason of ApplyConflict. This type allows another controller on the spoke to control certain field of the resource. UpdateStrategyTypeServerSideApply UpdateStrategyType = "ServerSideApply" + + // UpdateStrategyTypeReadOnly type means only check the existence of the resource based on the resource's metadata. + // If the statusFeedBackRules are set, the feedbackResult will also be returned. + // The resource will not be removed when the type is ReadOnly, and only resource metadata is required. + UpdateStrategyTypeReadOnly UpdateStrategyType = "ReadOnly" ) type ServerSideApplyConfig struct { diff --git a/vendor/open-cluster-management.io/api/work/v1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/work/v1/zz_generated.swagger_doc_generated.go index 8689d32f8..000dc7f22 100644 --- a/vendor/open-cluster-management.io/api/work/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/work/v1/zz_generated.swagger_doc_generated.go @@ -287,7 +287,7 @@ func (StatusFeedbackResult) SwaggerDoc() map[string]string { var map_UpdateStrategy = map[string]string{ "": "UpdateStrategy defines the strategy to update this manifest", - "type": "type defines the strategy to update this manifest, default value is Update. Update type means to update resource by an update call. CreateOnly type means do not update resource based on current manifest. ServerSideApply type means to update resource using server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False with the reason of ApplyConflict.", + "type": "type defines the strategy to update this manifest, default value is Update. Update type means to update resource by an update call. CreateOnly type means do not update resource based on current manifest. ServerSideApply type means to update resource using server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False with the reason of ApplyConflict. ReadOnly type means the agent will only check the existence of the resource based on its metadata.", "serverSideApply": "serverSideApply defines the configuration for server side apply. It is honored only when type of updateStrategy is ServerSideApply", } diff --git a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml index f0d86d922..2e4f54966 100644 --- a/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml +++ b/vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml @@ -284,11 +284,14 @@ spec: server side apply with work-controller as the field manager. If there is conflict, the related Applied condition of manifest will be in the status of False - with the reason of ApplyConflict. + with the reason of ApplyConflict. ReadOnly type means + the agent will only check the existence of the resource + based on its metadata. enum: - Update - CreateOnly - ServerSideApply + - ReadOnly type: string required: - type