diff --git a/api/provisioning/v1alpha1/conditions.go b/api/provisioning/v1alpha1/conditions.go new file mode 100644 index 00000000..e9f89a13 --- /dev/null +++ b/api/provisioning/v1alpha1/conditions.go @@ -0,0 +1,73 @@ +package v1alpha1 + +// ConditionType is a string representing the condition's type +type ConditionType string + +// The following constants define the different types of conditions that will be set for ClusterTemplate +var CTconditionTypes = struct { + Validated ConditionType +}{ + Validated: "ClusterTemplateValidated", +} + +// The following constants define the different types of conditions that will be set for ProvisioningRequest +var PRconditionTypes = struct { + Validated ConditionType + HardwareTemplateRendered ConditionType + HardwareProvisioned ConditionType + HardwareNodeConfigApplied ConditionType + HardwareConfigured ConditionType + ClusterInstanceRendered ConditionType + ClusterResourcesCreated ConditionType + ClusterInstanceProcessed ConditionType + ClusterProvisioned ConditionType + ConfigurationApplied ConditionType + UpgradeCompleted ConditionType +}{ + Validated: "ProvisioningRequestValidated", + HardwareTemplateRendered: "HardwareTemplateRendered", + HardwareProvisioned: "HardwareProvisioned", + HardwareNodeConfigApplied: "HardwareNodeConfigApplied", + HardwareConfigured: "HardwareConfigured", + ClusterInstanceRendered: "ClusterInstanceRendered", + ClusterResourcesCreated: "ClusterResourcesCreated", + ClusterInstanceProcessed: "ClusterInstanceProcessed", + ClusterProvisioned: "ClusterProvisioned", + ConfigurationApplied: "ConfigurationApplied", + UpgradeCompleted: "UpgradeCompleted", +} + +// ConditionReason is a string representing the condition's reason +type ConditionReason string + +// The following constants define the different reasons that conditions will be set for ClusterTemplate +var CTconditionReasons = struct { + Completed ConditionReason + Failed ConditionReason +}{ + Completed: "Completed", + Failed: "Failed", +} + +// The following constants define the different reasons that conditions will be set for ProvisioningRequest +var CRconditionReasons = struct { + NotApplied ConditionReason + ClusterNotReady ConditionReason + Completed ConditionReason + Failed ConditionReason + InProgress ConditionReason + Missing ConditionReason + OutOfDate ConditionReason + TimedOut ConditionReason + Unknown ConditionReason +}{ + NotApplied: "NotApplied", + ClusterNotReady: "ClusterNotReady", + Completed: "Completed", + Failed: "Failed", + InProgress: "InProgress", + Missing: "Missing", + OutOfDate: "OutOfDate", + TimedOut: "TimedOut", + Unknown: "Unknown", +} diff --git a/api/provisioning/v1alpha1/provisioningrequest_validation.go b/api/provisioning/v1alpha1/provisioningrequest_validation.go index 691267d6..4b914ec6 100644 --- a/api/provisioning/v1alpha1/provisioningrequest_validation.go +++ b/api/provisioning/v1alpha1/provisioningrequest_validation.go @@ -244,7 +244,7 @@ func (r *ProvisioningRequest) GetClusterTemplateRef(ctx context.Context, client if ct.Name == clusterTemplateRefName { validatedCond := meta.FindStatusCondition( ct.Status.Conditions, - "ClusterTemplateValidated") // TODO: consider exposing the conditions in the API + string(CTconditionTypes.Validated)) if validatedCond != nil && validatedCond.Status == metav1.ConditionTrue { return &ct, nil } diff --git a/api/provisioning/v1alpha1/provisioningrequest_webhook.go b/api/provisioning/v1alpha1/provisioningrequest_webhook.go index dd489ef1..c086b46e 100644 --- a/api/provisioning/v1alpha1/provisioningrequest_webhook.go +++ b/api/provisioning/v1alpha1/provisioningrequest_webhook.go @@ -122,9 +122,9 @@ func (r *ProvisioningRequest) validateCreateOrUpdate(oldPr *ProvisioningRequest) // Once provisioning has started or reached a final state (Completed or Failed), // updates to immutable fields in the ClusterInstance input are disallowed, // with the exception of scaling up/down when Cluster provisioning is completed. - // TODO: consider exposing the conditions in the API. - crProvisionedCond := meta.FindStatusCondition(r.Status.Conditions, "ClusterProvisioned") - if crProvisionedCond != nil && crProvisionedCond.Reason != "Unknown" { + crProvisionedCond := meta.FindStatusCondition( + r.Status.Conditions, string(PRconditionTypes.ClusterProvisioned)) + if crProvisionedCond != nil && crProvisionedCond.Reason != string(CRconditionReasons.Unknown) { oldPrClusterInstanceInput, err := ExtractMatchingInput( oldPr.Spec.TemplateParameters.Raw, TemplateParamClusterInstance) if err != nil { diff --git a/internal/controllers/clustertemplate_controller.go b/internal/controllers/clustertemplate_controller.go index 2c683ac4..73c5dc7e 100644 --- a/internal/controllers/clustertemplate_controller.go +++ b/internal/controllers/clustertemplate_controller.go @@ -588,15 +588,15 @@ func checkSchemaContains(actual, expected map[string]any, currentPath string) er func (t *clusterTemplateReconcilerTask) updateStatusConditionValidated(ctx context.Context, errMsg string) error { if errMsg != "" { utils.SetStatusCondition(&t.object.Status.Conditions, - utils.CTconditionTypes.Validated, - utils.CTconditionReasons.Failed, + provisioningv1alpha1.CTconditionTypes.Validated, + provisioningv1alpha1.CTconditionReasons.Failed, metav1.ConditionFalse, errMsg, ) } else { utils.SetStatusCondition(&t.object.Status.Conditions, - utils.CTconditionTypes.Validated, - utils.CTconditionReasons.Completed, + provisioningv1alpha1.CTconditionTypes.Validated, + provisioningv1alpha1.CTconditionReasons.Completed, metav1.ConditionTrue, "The cluster template validation succeeded", ) diff --git a/internal/controllers/clustertemplate_controller_test.go b/internal/controllers/clustertemplate_controller_test.go index f706bbd4..94c53522 100644 --- a/internal/controllers/clustertemplate_controller_test.go +++ b/internal/controllers/clustertemplate_controller_test.go @@ -132,9 +132,9 @@ clustertemplate-a-policy-v1-defaultHugepagesSize: "1G"`, Expect(c.Get(ctx, req.NamespacedName, updatedCT)).To(Succeed()) conditions := updatedCT.Status.Conditions Expect(conditions).To(HaveLen(1)) - Expect(conditions[0].Type).To(Equal(string(utils.CTconditionTypes.Validated))) + Expect(conditions[0].Type).To(Equal(string(provisioningv1alpha1.CTconditionTypes.Validated))) Expect(conditions[0].Status).To(Equal(metav1.ConditionTrue)) - Expect(conditions[0].Reason).To(Equal(string(utils.CTconditionReasons.Completed))) + Expect(conditions[0].Reason).To(Equal(string(provisioningv1alpha1.CTconditionReasons.Completed))) Expect(conditions[0].Message).To(Equal("The cluster template validation succeeded")) }) @@ -156,9 +156,9 @@ clustertemplate-a-policy-v1-defaultHugepagesSize: "1G"`, Expect(c.Get(ctx, req.NamespacedName, updatedCT)).To(Succeed()) conditions := updatedCT.Status.Conditions Expect(conditions).To(HaveLen(1)) - Expect(conditions[0].Type).To(Equal(string(utils.CTconditionTypes.Validated))) + Expect(conditions[0].Type).To(Equal(string(provisioningv1alpha1.CTconditionTypes.Validated))) Expect(conditions[0].Status).To(Equal(metav1.ConditionFalse)) - Expect(conditions[0].Reason).To(Equal(string(utils.CTconditionReasons.Failed))) + Expect(conditions[0].Reason).To(Equal(string(provisioningv1alpha1.CTconditionReasons.Failed))) Expect(conditions[0].Message).To(ContainSubstring(fmt.Sprintf( "the ConfigMap '%s' is not found in the namespace '%s'", ciDefaultsCm, ctNamespace))) Expect(conditions[0].Message).To(ContainSubstring(fmt.Sprintf( @@ -517,9 +517,9 @@ clustertemplate-a-policy-v1-defaultHugepagesSize: "1G"`, // Check the status condition conditions := t.object.Status.Conditions Expect(conditions).To(HaveLen(1)) - Expect(conditions[0].Type).To(Equal(string(utils.CTconditionTypes.Validated))) + Expect(conditions[0].Type).To(Equal(string(provisioningv1alpha1.CTconditionTypes.Validated))) Expect(conditions[0].Status).To(Equal(metav1.ConditionTrue)) - Expect(conditions[0].Reason).To(Equal(string(utils.CTconditionReasons.Completed))) + Expect(conditions[0].Reason).To(Equal(string(provisioningv1alpha1.CTconditionReasons.Completed))) Expect(conditions[0].Message).To(Equal("The cluster template validation succeeded")) }) @@ -532,9 +532,9 @@ clustertemplate-a-policy-v1-defaultHugepagesSize: "1G"`, // Check the status condition conditions := t.object.Status.Conditions Expect(conditions).To(HaveLen(1)) - Expect(conditions[0].Type).To(Equal(string(utils.CTconditionTypes.Validated))) + Expect(conditions[0].Type).To(Equal(string(provisioningv1alpha1.CTconditionTypes.Validated))) Expect(conditions[0].Status).To(Equal(metav1.ConditionFalse)) - Expect(conditions[0].Reason).To(Equal(string(utils.CTconditionReasons.Failed))) + Expect(conditions[0].Reason).To(Equal(string(provisioningv1alpha1.CTconditionReasons.Failed))) Expect(conditions[0].Message).To(ContainSubstring(fmt.Sprintf( "the ConfigMap '%s' is not found in the namespace '%s'", ciDefaultsCm, ctNamespace))) Expect(conditions[0].Message).To(ContainSubstring(fmt.Sprintf( @@ -555,9 +555,9 @@ clustertemplate-a-policy-v1-defaultHugepagesSize: "1G"`, // Check the status condition conditions := t.object.Status.Conditions Expect(conditions).To(HaveLen(1)) - Expect(conditions[0].Type).To(Equal(string(utils.CTconditionTypes.Validated))) + Expect(conditions[0].Type).To(Equal(string(provisioningv1alpha1.CTconditionTypes.Validated))) Expect(conditions[0].Status).To(Equal(metav1.ConditionFalse)) - Expect(conditions[0].Reason).To(Equal(string(utils.CTconditionReasons.Failed))) + Expect(conditions[0].Reason).To(Equal(string(provisioningv1alpha1.CTconditionReasons.Failed))) Expect(conditions[0].Message).To(ContainSubstring(fmt.Sprintf( "the value of key %s from ConfigMap %s is not a valid duration string", utils.ClusterConfigurationTimeoutConfigKey, ptDefaultsCm))) Expect(conditions[0].Message).To(ContainSubstring(fmt.Sprintf( @@ -576,9 +576,9 @@ clustertemplate-a-policy-v1-defaultHugepagesSize: "1G"`, conditions := t.object.Status.Conditions Expect(conditions).To(HaveLen(1)) errMessage := fmt.Sprintf("the value of HardwareProvisioningTimeout from hardware template %s is not a valid duration string", hwtmpl.Name) - Expect(conditions[0].Type).To(Equal(string(utils.CTconditionTypes.Validated))) + Expect(conditions[0].Type).To(Equal(string(provisioningv1alpha1.CTconditionTypes.Validated))) Expect(conditions[0].Status).To(Equal(metav1.ConditionFalse)) - Expect(conditions[0].Reason).To(Equal(string(utils.CTconditionReasons.Failed))) + Expect(conditions[0].Reason).To(Equal(string(provisioningv1alpha1.CTconditionReasons.Failed))) Expect(conditions[0].Message).To(ContainSubstring(errMessage)) // Check the HardwareTemplate status condition diff --git a/internal/controllers/provisioningrequest_clusterconfig.go b/internal/controllers/provisioningrequest_clusterconfig.go index 31b22866..e67d7dcc 100644 --- a/internal/controllers/provisioningrequest_clusterconfig.go +++ b/internal/controllers/provisioningrequest_clusterconfig.go @@ -102,8 +102,8 @@ func (t *provisioningRequestReconcilerTask) updateConfigurationAppliedStatus( if len(targetPolicies) == 0 { t.object.Status.Extensions.ClusterDetails.NonCompliantAt = metav1.Time{} utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.Missing, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.Missing, metav1.ConditionFalse, "No configuration present", ) @@ -114,8 +114,8 @@ func (t *provisioningRequestReconcilerTask) updateConfigurationAppliedStatus( if allPoliciesCompliant { t.object.Status.Extensions.ClusterDetails.NonCompliantAt = metav1.Time{} utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "The configuration is up to date", ) @@ -140,8 +140,8 @@ func (t *provisioningRequestReconcilerTask) updateConfigurationAppliedStatus( ), ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.ClusterNotReady, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.ClusterNotReady, metav1.ConditionFalse, "The Cluster is not yet ready", ) @@ -157,8 +157,8 @@ func (t *provisioningRequestReconcilerTask) updateConfigurationAppliedStatus( // No timeout is computed if all policies are in inform, just out of date. t.object.Status.Extensions.ClusterDetails.NonCompliantAt = metav1.Time{} utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.OutOfDate, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.OutOfDate, metav1.ConditionFalse, "The configuration is out of date", ) @@ -166,17 +166,17 @@ func (t *provisioningRequestReconcilerTask) updateConfigurationAppliedStatus( policyConfigTimedOut = t.hasPolicyConfigurationTimedOut(ctx) message := "The configuration is still being applied" - reason := utils.CRconditionReasons.InProgress + reason := provisioningv1alpha1.CRconditionReasons.InProgress utils.SetProvisioningStateInProgress(t.object, "Cluster configuration is being applied") if policyConfigTimedOut { message += ", but it timed out" - reason = utils.CRconditionReasons.TimedOut + reason = provisioningv1alpha1.CRconditionReasons.TimedOut utils.SetProvisioningStateFailed(t.object, "Cluster configuration timed out") } utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, reason, metav1.ConditionFalse, message, @@ -189,7 +189,7 @@ func (t *provisioningRequestReconcilerTask) updateConfigurationAppliedStatus( // updateZTPStatus updates status.ClusterDetails.ZtpStatus. func (t *provisioningRequestReconcilerTask) updateZTPStatus(ctx context.Context, allPoliciesCompliant bool) error { // Check if the cluster provision has started. - crProvisionedCond := meta.FindStatusCondition(t.object.Status.Conditions, string(utils.PRconditionTypes.ClusterProvisioned)) + crProvisionedCond := meta.FindStatusCondition(t.object.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned)) if crProvisionedCond != nil { // If the provisioning has started, and the ZTP status is empty or not done. if t.object.Status.Extensions.ClusterDetails.ZtpStatus != utils.ClusterZtpDone { @@ -254,7 +254,7 @@ func (t *provisioningRequestReconcilerTask) hasPolicyConfigurationTimedOut(ctx c // Get the ConfigurationApplied condition. configurationAppliedCondition := meta.FindStatusCondition( t.object.Status.Conditions, - string(utils.PRconditionTypes.ConfigurationApplied)) + string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) // If the condition does not exist, set the non compliant timestamp since we // get here just for policies that have a status different from Compliant. @@ -266,7 +266,7 @@ func (t *provisioningRequestReconcilerTask) hasPolicyConfigurationTimedOut(ctx c // If the current status of the Condition is false. if configurationAppliedCondition.Status == metav1.ConditionFalse { switch configurationAppliedCondition.Reason { - case string(utils.CRconditionReasons.InProgress): + case string(provisioningv1alpha1.CRconditionReasons.InProgress): // Check if the configuration application has timed out. if t.object.Status.Extensions.ClusterDetails.NonCompliantAt.IsZero() { t.object.Status.Extensions.ClusterDetails.NonCompliantAt = metav1.Now() @@ -276,13 +276,13 @@ func (t *provisioningRequestReconcilerTask) hasPolicyConfigurationTimedOut(ctx c t.object.Status.Extensions.ClusterDetails.NonCompliantAt.Time, t.timeouts.clusterConfiguration) } - case string(utils.CRconditionReasons.TimedOut): + case string(provisioningv1alpha1.CRconditionReasons.TimedOut): policyTimedOut = true - case string(utils.CRconditionReasons.Missing): + case string(provisioningv1alpha1.CRconditionReasons.Missing): t.object.Status.Extensions.ClusterDetails.NonCompliantAt = metav1.Now() - case string(utils.CRconditionReasons.OutOfDate): + case string(provisioningv1alpha1.CRconditionReasons.OutOfDate): t.object.Status.Extensions.ClusterDetails.NonCompliantAt = metav1.Now() - case string(utils.CRconditionReasons.ClusterNotReady): + case string(provisioningv1alpha1.CRconditionReasons.ClusterNotReady): // The cluster might not be ready because its being initially provisioned or // there are problems after provisionion, so it might be that NonCompliantAt // has been previously set. @@ -295,11 +295,11 @@ func (t *provisioningRequestReconcilerTask) hasPolicyConfigurationTimedOut(ctx c default: t.logger.InfoContext(ctx, fmt.Sprintf("Unexpected Reason for condition type %s", - utils.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, ), ) } - } else if configurationAppliedCondition.Reason == string(utils.CRconditionReasons.Completed) { + } else if configurationAppliedCondition.Reason == string(provisioningv1alpha1.CRconditionReasons.Completed) { t.object.Status.Extensions.ClusterDetails.NonCompliantAt = metav1.Now() } diff --git a/internal/controllers/provisioningrequest_clusterconfig_test.go b/internal/controllers/provisioningrequest_clusterconfig_test.go index 21815cc8..774437f8 100644 --- a/internal/controllers/provisioningrequest_clusterconfig_test.go +++ b/internal/controllers/provisioningrequest_clusterconfig_test.go @@ -67,8 +67,8 @@ var _ = Describe("policyManagement", func() { Status: provisioningv1alpha1.ClusterTemplateStatus{ Conditions: []metav1.Condition{ { - Type: string(utils.CTconditionTypes.Validated), - Reason: string(utils.CTconditionReasons.Completed), + Type: string(provisioningv1alpha1.CTconditionTypes.Validated), + Reason: string(provisioningv1alpha1.CTconditionReasons.Completed), Status: metav1.ConditionTrue, }, }, @@ -173,7 +173,7 @@ defaultHugepagesSize: "1G"`, // Fake the hw provision status Conditions: []metav1.Condition{ { - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionTrue, }, }, @@ -266,19 +266,19 @@ defaultHugepagesSize: "1G"`, Expect(err).ToNot(HaveOccurred()) Expect(managedClusterExists).To(BeTrue()) utils.SetStatusCondition(&managedCluster1.Status.Conditions, - utils.ConditionType(clusterv1.ManagedClusterConditionAvailable), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionAvailable), "ManagedClusterAvailable", metav1.ConditionTrue, "Managed cluster is available", ) utils.SetStatusCondition(&managedCluster1.Status.Conditions, - utils.ConditionType(clusterv1.ManagedClusterConditionHubAccepted), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionHubAccepted), "HubClusterAdminAccepted", metav1.ConditionTrue, "Accepted by hub cluster admin", ) utils.SetStatusCondition(&managedCluster1.Status.Conditions, - utils.ConditionType(clusterv1.ManagedClusterConditionJoined), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionJoined), "ManagedClusterJoined", metav1.ConditionTrue, "Managed cluster joined", @@ -369,7 +369,7 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).To(BeNil()) // Add the ClusterProvisioned condition. @@ -392,8 +392,8 @@ defaultHugepagesSize: "1G"`, } utils.SetStatusCondition(&CRTask.object.Status.Conditions, - utils.PRconditionTypes.ClusterProvisioned, - utils.CRconditionReasons.InProgress, + provisioningv1alpha1.PRconditionTypes.ClusterProvisioned, + provisioningv1alpha1.CRconditionReasons.InProgress, metav1.ConditionFalse, "", ) @@ -462,8 +462,8 @@ defaultHugepagesSize: "1G"`, // Update the ProvisioningRequest ConfigurationApplied condition to TimedOut. utils.SetStatusCondition(&CRTask.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.TimedOut, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.TimedOut, metav1.ConditionFalse, "The configuration is still being applied, but it timed out", ) @@ -535,11 +535,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionTrue)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.Completed))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.Completed))) Expect(configAppliedCond.Message).To(Equal("The configuration is up to date")) }) @@ -640,11 +640,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.InProgress))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.InProgress))) Expect(configAppliedCond.Message).To(Equal("The configuration is still being applied")) // Take 2 minutes to the NonCompliantAt timestamp to mock timeout. @@ -662,11 +662,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.TimedOut))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.TimedOut))) Expect(configAppliedCond.Message).To( Equal("The configuration is still being applied, but it timed out")) @@ -688,11 +688,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.OutOfDate))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.OutOfDate))) Expect(configAppliedCond.Message).To( Equal("The configuration is out of date")) }) @@ -788,11 +788,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.InProgress))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.InProgress))) // Step 2: Update the managed cluster to make it not ready. managedCluster1 := &clusterv1.ManagedCluster{} @@ -800,7 +800,7 @@ defaultHugepagesSize: "1G"`, Expect(err).ToNot(HaveOccurred()) Expect(managedClusterExists).To(BeTrue()) utils.SetStatusCondition(&managedCluster1.Status.Conditions, - utils.ConditionType(clusterv1.ManagedClusterConditionAvailable), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionAvailable), "ManagedClusterAvailable", metav1.ConditionFalse, "Managed cluster is not available", @@ -827,15 +827,15 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.ClusterNotReady))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.ClusterNotReady))) // Step 3: Update the managed cluster to make it ready again. utils.SetStatusCondition(&managedCluster1.Status.Conditions, - utils.ConditionType(clusterv1.ManagedClusterConditionAvailable), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionAvailable), "ManagedClusterAvailable", metav1.ConditionTrue, "Managed cluster is available", @@ -862,11 +862,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.InProgress))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.InProgress))) }) It("It sets ClusterNotReady if the cluster is unstable/not ready", func() { @@ -904,7 +904,7 @@ defaultHugepagesSize: "1G"`, Expect(err).ToNot(HaveOccurred()) Expect(managedClusterExists).To(BeTrue()) utils.SetStatusCondition(&managedCluster1.Status.Conditions, - utils.ConditionType(clusterv1.ManagedClusterConditionAvailable), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionAvailable), "ManagedClusterAvailable", metav1.ConditionFalse, "Managed cluster is not available", @@ -953,11 +953,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.ClusterNotReady))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.ClusterNotReady))) }) It("Sets the NonCompliantAt timestamp and times out", func() { @@ -1067,11 +1067,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.OutOfDate))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.OutOfDate))) Expect(configAppliedCond.Message).To(Equal("The configuration is out of date")) // Enforce the NonCompliant policy. @@ -1115,11 +1115,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.InProgress))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.InProgress))) Expect(configAppliedCond.Message).To(Equal("The configuration is still being applied")) // Take 2 minutes to the NonCompliantAt timestamp to mock timeout. @@ -1137,11 +1137,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.TimedOut))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.TimedOut))) Expect(configAppliedCond.Message).To( Equal("The configuration is still being applied, but it timed out")) @@ -1156,11 +1156,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.TimedOut))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.TimedOut))) Expect(configAppliedCond.Message).To( Equal("The configuration is still being applied, but it timed out")) }) @@ -1267,11 +1267,11 @@ defaultHugepagesSize: "1G"`, // Verify that the ConfigurationApplied condition is set to InProgress. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.InProgress))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.InProgress))) Expect(configAppliedCond.Message).To(Equal("The configuration is still being applied")) // Take 2 minutes to the NonCompliantAt timestamp to mock timeout. @@ -1290,11 +1290,11 @@ defaultHugepagesSize: "1G"`, // Verify that the ConfigurationApplied condition is set to TimedOut. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.TimedOut))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.TimedOut))) Expect(configAppliedCond.Message).To( Equal("The configuration is still being applied, but it timed out")) @@ -1309,11 +1309,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions = CRTask.object.Status.Conditions configAppliedCond = meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.TimedOut))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.TimedOut))) Expect(configAppliedCond.Message).To( Equal("The configuration is still being applied, but it timed out")) }) @@ -1361,11 +1361,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.Missing))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.Missing))) }) It("It handles updated/deleted policies for matched clusters", func() { @@ -1583,11 +1583,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionTrue)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.Completed))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.Completed))) Expect(configAppliedCond.Message).To(Equal("The configuration is up to date")) }) @@ -1690,11 +1690,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.InProgress))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.InProgress))) Expect(configAppliedCond.Message).To(Equal("The configuration is still being applied")) }) @@ -1797,11 +1797,11 @@ defaultHugepagesSize: "1G"`, // Check the status conditions. conditions := CRTask.object.Status.Conditions configAppliedCond := meta.FindStatusCondition( - conditions, string(utils.PRconditionTypes.ConfigurationApplied)) + conditions, string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied)) Expect(configAppliedCond).ToNot(BeNil()) - Expect(configAppliedCond.Type).To(Equal(string(utils.PRconditionTypes.ConfigurationApplied))) + Expect(configAppliedCond.Type).To(Equal(string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied))) Expect(configAppliedCond.Status).To(Equal(metav1.ConditionFalse)) - Expect(configAppliedCond.Reason).To(Equal(string(utils.CRconditionReasons.InProgress))) + Expect(configAppliedCond.Reason).To(Equal(string(provisioningv1alpha1.CRconditionReasons.InProgress))) Expect(configAppliedCond.Message).To(Equal("The configuration is still being applied")) }) }) @@ -1885,8 +1885,8 @@ var _ = Describe("hasPolicyConfigurationTimedOut", func() { It("Returns false if the status is unexpected and NonCompliantAt is not set", func() { // Set the status to InProgress. utils.SetStatusCondition(&CRTask.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.Unknown, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.Unknown, metav1.ConditionFalse, "", ) @@ -1901,8 +1901,8 @@ var _ = Describe("hasPolicyConfigurationTimedOut", func() { It("Returns false if the status is Completed and sets NonCompliantAt", func() { // Set the status to InProgress. utils.SetStatusCondition(&CRTask.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "", ) @@ -1917,8 +1917,8 @@ var _ = Describe("hasPolicyConfigurationTimedOut", func() { It("Returns false if the status is OutOfDate and sets NonCompliantAt", func() { // Set the status to InProgress. utils.SetStatusCondition(&CRTask.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.OutOfDate, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.OutOfDate, metav1.ConditionFalse, "", ) @@ -1933,8 +1933,8 @@ var _ = Describe("hasPolicyConfigurationTimedOut", func() { It("Returns false if the status is Missing and sets NonCompliantAt", func() { // Set the status to InProgress. utils.SetStatusCondition(&CRTask.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.Missing, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.Missing, metav1.ConditionFalse, "", ) @@ -1949,8 +1949,8 @@ var _ = Describe("hasPolicyConfigurationTimedOut", func() { It("Returns true if the status is InProgress and the timeout has passed", func() { // Set the status to InProgress. utils.SetStatusCondition(&CRTask.object.Status.Conditions, - utils.PRconditionTypes.ConfigurationApplied, - utils.CRconditionReasons.InProgress, + provisioningv1alpha1.PRconditionTypes.ConfigurationApplied, + provisioningv1alpha1.CRconditionReasons.InProgress, metav1.ConditionFalse, "", ) diff --git a/internal/controllers/provisioningrequest_clusterinstall.go b/internal/controllers/provisioningrequest_clusterinstall.go index 128f8fbc..d9db61e6 100644 --- a/internal/controllers/provisioningrequest_clusterinstall.go +++ b/internal/controllers/provisioningrequest_clusterinstall.go @@ -62,8 +62,8 @@ func (t *provisioningRequestReconcilerTask) renderClusterInstanceTemplate( // updates to immutable fields in the ClusterInstance spec are disallowed, // with the exception of scaling up/down when Cluster provisioning is completed. crProvisionedCond := meta.FindStatusCondition(t.object.Status.Conditions, - string(utils.PRconditionTypes.ClusterProvisioned)) - if crProvisionedCond != nil && crProvisionedCond.Reason != string(utils.CRconditionReasons.Unknown) { + string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned)) + if crProvisionedCond != nil && crProvisionedCond.Reason != string(provisioningv1alpha1.CRconditionReasons.Unknown) { existingClusterInstance := &unstructured.Unstructured{} existingClusterInstance.SetGroupVersionKind( renderedClusterInstanceUnstructure.GroupVersionKind()) @@ -96,7 +96,7 @@ func (t *provisioningRequestReconcilerTask) renderClusterInstanceTemplate( for _, updatedField := range updatedFields { // Suppress install manifests to prevent unnecessary updates if updatedField == "clusterImageSetNameRef" && - crProvisionedCond.Reason == string(utils.CRconditionReasons.Completed) { + crProvisionedCond.Reason == string(provisioningv1alpha1.CRconditionReasons.Completed) { for _, crd := range utils.CRDsToBeSuppressedForUpgrade { if !slices.Contains(suppressedManifests, crd) { suppressedManifests = append(suppressedManifests, crd) @@ -107,7 +107,7 @@ func (t *provisioningRequestReconcilerTask) renderClusterInstanceTemplate( } } if len(scalingNodes) != 0 && - crProvisionedCond.Reason != string(utils.CRconditionReasons.Completed) { + crProvisionedCond.Reason != string(provisioningv1alpha1.CRconditionReasons.Completed) { // In-progress || Failed disallowedChanges = append(disallowedChanges, scalingNodes...) } @@ -278,8 +278,8 @@ func (t *provisioningRequestReconcilerTask) updateClusterInstanceProcessedStatus if len(ci.Status.Conditions) == 0 { message := fmt.Sprintf("Waiting for ClusterInstance (%s) to be processed", ci.Name) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterInstanceProcessed, - utils.CRconditionReasons.Unknown, + provisioningv1alpha1.PRconditionTypes.ClusterInstanceProcessed, + provisioningv1alpha1.CRconditionReasons.Unknown, metav1.ConditionUnknown, message, ) @@ -291,8 +291,8 @@ func (t *provisioningRequestReconcilerTask) updateClusterInstanceProcessedStatus ciCondition := meta.FindStatusCondition(ci.Status.Conditions, string(condType)) if ciCondition != nil && ciCondition.Status != metav1.ConditionTrue { utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterInstanceProcessed, - utils.ConditionReason(ciCondition.Reason), + provisioningv1alpha1.PRconditionTypes.ClusterInstanceProcessed, + provisioningv1alpha1.ConditionReason(ciCondition.Reason), ciCondition.Status, ciCondition.Message, ) @@ -302,8 +302,8 @@ func (t *provisioningRequestReconcilerTask) updateClusterInstanceProcessedStatus } utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterInstanceProcessed, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.ClusterInstanceProcessed, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, fmt.Sprintf("Applied and processed ClusterInstance (%s) successfully", ci.Name), ) @@ -320,12 +320,12 @@ func (t *provisioningRequestReconcilerTask) updateClusterProvisionStatus(ci *sit if ciProvisionedCondition == nil { crClusterInstanceProcessedCond := meta.FindStatusCondition( - t.object.Status.Conditions, string(utils.PRconditionTypes.ClusterInstanceProcessed)) + t.object.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceProcessed)) if crClusterInstanceProcessedCond != nil && crClusterInstanceProcessedCond.Status == metav1.ConditionTrue { message := "Waiting for cluster installation to start" utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterProvisioned, - utils.CRconditionReasons.Unknown, + provisioningv1alpha1.PRconditionTypes.ClusterProvisioned, + provisioningv1alpha1.CRconditionReasons.Unknown, metav1.ConditionUnknown, message, ) @@ -333,8 +333,8 @@ func (t *provisioningRequestReconcilerTask) updateClusterProvisionStatus(ci *sit } } else { utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterProvisioned, - utils.ConditionReason(ciProvisionedCondition.Reason), + provisioningv1alpha1.PRconditionTypes.ClusterProvisioned, + provisioningv1alpha1.ConditionReason(ciProvisionedCondition.Reason), ciProvisionedCondition.Status, ciProvisionedCondition.Message, ) @@ -356,8 +356,8 @@ func (t *provisioningRequestReconcilerTask) updateClusterProvisionStatus(ci *sit // timed out message := "Cluster installation timed out" utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterProvisioned, - utils.CRconditionReasons.TimedOut, + provisioningv1alpha1.PRconditionTypes.ClusterProvisioned, + provisioningv1alpha1.CRconditionReasons.TimedOut, metav1.ConditionFalse, message, ) diff --git a/internal/controllers/provisioningrequest_clusterinstall_test.go b/internal/controllers/provisioningrequest_clusterinstall_test.go index 510d6061..19fe3655 100644 --- a/internal/controllers/provisioningrequest_clusterinstall_test.go +++ b/internal/controllers/provisioningrequest_clusterinstall_test.go @@ -122,12 +122,12 @@ nodes: // Check if status condition was updated correctly cond := meta.FindStatusCondition(task.object.Status.Conditions, - string(utils.PRconditionTypes.ClusterInstanceRendered)) + string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered)) Expect(cond).ToNot(BeNil()) verifyStatusCondition(*cond, metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), Message: "ClusterInstance rendered and passed dry-run validation", }) }) @@ -140,12 +140,12 @@ nodes: // Check if status condition was updated correctly cond := meta.FindStatusCondition(task.object.Status.Conditions, - string(utils.PRconditionTypes.ClusterInstanceRendered)) + string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered)) Expect(cond).ToNot(BeNil()) verifyStatusCondition(*cond, metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "spec.clusterName cannot be empty", }) }) @@ -154,9 +154,9 @@ nodes: // Simulate that the ClusterInstance has been provisioned task.object.Status.Conditions = []metav1.Condition{ { - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }, } @@ -191,12 +191,12 @@ nodes: // does not go through the API server. As a result, fields with empty values like false or "" are // stripped from the retrieved ClusterInstance CR (existing ClusterInstance) in the fakeclient. cond := meta.FindStatusCondition(task.object.Status.Conditions, - string(utils.PRconditionTypes.ClusterInstanceRendered)) + string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered)) Expect(cond).ToNot(BeNil()) verifyStatusCondition(*cond, metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "Failed to render and validate ClusterInstance: detected changes in immutable fields", }) }) diff --git a/internal/controllers/provisioningrequest_controller.go b/internal/controllers/provisioningrequest_controller.go index 0d326b65..9cc980a7 100644 --- a/internal/controllers/provisioningrequest_controller.go +++ b/internal/controllers/provisioningrequest_controller.go @@ -380,11 +380,11 @@ func (t *provisioningRequestReconcilerTask) checkClusterDeployConfigState(ctx co // checkResourcePreparationStatus checks for validation and preparation failures, setting the // provisioningState to failed if no provisioning is currently in progress and issues are found. func (t *provisioningRequestReconcilerTask) checkResourcePreparationStatus(ctx context.Context) error { - conditionTypes := []utils.ConditionType{ - utils.PRconditionTypes.Validated, - utils.PRconditionTypes.ClusterInstanceRendered, - utils.PRconditionTypes.ClusterResourcesCreated, - utils.PRconditionTypes.HardwareTemplateRendered, + conditionTypes := []provisioningv1alpha1.ConditionType{ + provisioningv1alpha1.PRconditionTypes.Validated, + provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered, + provisioningv1alpha1.PRconditionTypes.ClusterResourcesCreated, + provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered, } for _, condType := range conditionTypes { @@ -413,8 +413,8 @@ func (t *provisioningRequestReconcilerTask) handleValidation(ctx context.Context slog.String("error", err.Error()), ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.Validated, - utils.CRconditionReasons.Failed, + provisioningv1alpha1.PRconditionTypes.Validated, + provisioningv1alpha1.CRconditionReasons.Failed, metav1.ConditionFalse, "Failed to validate the ProvisioningRequest: "+err.Error(), ) @@ -425,8 +425,8 @@ func (t *provisioningRequestReconcilerTask) handleValidation(ctx context.Context slog.String("name", t.object.Name), ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.Validated, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.Validated, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "The provisioning request validation succeeded", ) @@ -450,8 +450,8 @@ func (t *provisioningRequestReconcilerTask) handleRenderClusterInstance(ctx cont slog.String("error", err.Error()), ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterInstanceRendered, - utils.CRconditionReasons.Failed, + provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered, + provisioningv1alpha1.CRconditionReasons.Failed, metav1.ConditionFalse, "Failed to render and validate ClusterInstance: "+err.Error(), ) @@ -463,8 +463,8 @@ func (t *provisioningRequestReconcilerTask) handleRenderClusterInstance(ctx cont ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterInstanceRendered, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "ClusterInstance rendered and passed dry-run validation", ) @@ -491,8 +491,8 @@ func (t *provisioningRequestReconcilerTask) handleClusterResources(ctx context.C ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterResourcesCreated, - utils.CRconditionReasons.Failed, + provisioningv1alpha1.PRconditionTypes.ClusterResourcesCreated, + provisioningv1alpha1.CRconditionReasons.Failed, metav1.ConditionFalse, "Failed to apply the required cluster resource: "+err.Error(), ) @@ -504,8 +504,8 @@ func (t *provisioningRequestReconcilerTask) handleClusterResources(ctx context.C ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.ClusterResourcesCreated, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.ClusterResourcesCreated, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "Cluster resources applied", ) @@ -529,8 +529,8 @@ func (t *provisioningRequestReconcilerTask) renderHardwareTemplate(ctx context.C ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.HardwareTemplateRendered, - utils.CRconditionReasons.Failed, + provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered, + provisioningv1alpha1.CRconditionReasons.Failed, metav1.ConditionFalse, "Failed to render the Hardware template: "+err.Error(), ) @@ -542,8 +542,8 @@ func (t *provisioningRequestReconcilerTask) renderHardwareTemplate(ctx context.C ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.HardwareTemplateRendered, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "Rendered Hardware template successfully", ) diff --git a/internal/controllers/provisioningrequest_controller_test.go b/internal/controllers/provisioningrequest_controller_test.go index d4ab9aec..16aa9aae 100644 --- a/internal/controllers/provisioningrequest_controller_test.go +++ b/internal/controllers/provisioningrequest_controller_test.go @@ -571,8 +571,8 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Status: provisioningv1alpha1.ClusterTemplateStatus{ Conditions: []metav1.Condition{ { - Type: string(utils.CTconditionTypes.Validated), - Reason: string(utils.CTconditionReasons.Completed), + Type: string(provisioningv1alpha1.CTconditionTypes.Validated), + Reason: string(provisioningv1alpha1.CTconditionReasons.Completed), Status: metav1.ConditionTrue, }, }, @@ -613,9 +613,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { It("Verify status conditions if ProvisioningRequest validation fails", func() { // Fail the ClusterTemplate validation ctValidatedCond := meta.FindStatusCondition( - ct.Status.Conditions, string(utils.CTconditionTypes.Validated)) + ct.Status.Conditions, string(provisioningv1alpha1.CTconditionTypes.Validated)) ctValidatedCond.Status = metav1.ConditionFalse - ctValidatedCond.Reason = string(utils.CTconditionReasons.Failed) + ctValidatedCond.Reason = string(provisioningv1alpha1.CTconditionReasons.Failed) Expect(c.Status().Update(ctx, ct)).To(Succeed()) // Start reconciliation @@ -631,9 +631,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(1)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: fmt.Sprintf( "Failed to validate the ProvisioningRequest: failed to get the ClusterTemplate for "+ "ProvisioningRequest cluster-1: a valid ClusterTemplate (%s) does not exist in any namespace", @@ -661,14 +661,14 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(2)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[1], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "spec.nodes[0].templateRefs must be provided", }) @@ -697,19 +697,19 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(3)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[1], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[2], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterResourcesCreated), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterResourcesCreated), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "failed to create pull Secret for cluster cluster-1", }) @@ -737,29 +737,29 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(5)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[1], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[2], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterResourcesCreated), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterResourcesCreated), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[3], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareTemplateRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[4], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionUnknown, - Reason: string(utils.CRconditionReasons.Unknown), + Reason: string(provisioningv1alpha1.CRconditionReasons.Unknown), }) // Verify the start timestamp has been set for HardwareProvisioning Expect(reconciledCR.Status.Extensions.NodePoolRef.HardwareProvisioningCheckStart).ToNot(BeZero()) @@ -813,9 +813,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(5)) verifyStatusCondition(conditions[4], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), }) // Verify the start timestamp has been set for HardwareProvisioning Expect(reconciledCR.Status.Extensions.NodePoolRef.HardwareProvisioningCheckStart).ToNot(BeZero()) @@ -851,19 +851,19 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(7)) verifyStatusCondition(conditions[4], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[5], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareNodeConfigApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareNodeConfigApplied), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[6], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceProcessed), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceProcessed), Status: metav1.ConditionUnknown, - Reason: string(utils.CRconditionReasons.Unknown), + Reason: string(provisioningv1alpha1.CRconditionReasons.Unknown), }) // Verify provisioningState is still progressing when nodePool is provisioned and clusterInstance is created verifyProvisioningStatus(reconciledCR.Status.ProvisioningStatus, @@ -892,9 +892,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(5)) verifyStatusCondition(conditions[4], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), }) // Verify the provisioningState moves to failed when HW provisioning fails verifyProvisioningStatus(reconciledCR.Status.ProvisioningStatus, @@ -928,9 +928,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(5)) verifyStatusCondition(conditions[4], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.TimedOut), + Reason: string(provisioningv1alpha1.CRconditionReasons.TimedOut), }) // Verify the provisioningState moves to failed when HW provisioning times out @@ -970,15 +970,15 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // has changed to Completed Expect(len(conditions)).To(Equal(5)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "nodes.0: hostName is required", }) verifyStatusCondition(conditions[4], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) // Verify the provisioningState moves to failed verifyProvisioningStatus(reconciledCR.Status.ProvisioningStatus, @@ -1018,15 +1018,15 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // has changed to Completed Expect(len(conditions)).To(Equal(5)) verifyStatusCondition(conditions[1], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "spec.nodes[0].templateRefs must be provided", }) verifyStatusCondition(conditions[4], metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) // Verify the provisioningState moves to failed verifyProvisioningStatus(reconciledCR.Status.ProvisioningStatus, @@ -1126,20 +1126,20 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[6], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceProcessed), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceProcessed), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "Provisioning cluster", }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.ClusterNotReady), + Reason: string(provisioningv1alpha1.CRconditionReasons.ClusterNotReady), Message: "The Cluster is not yet ready", }) @@ -1184,14 +1184,14 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.TimedOut), + Reason: string(provisioningv1alpha1.CRconditionReasons.TimedOut), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.ClusterNotReady), + Reason: string(provisioningv1alpha1.CRconditionReasons.ClusterNotReady), Message: "The Cluster is not yet ready", }) // Verify the start timestamp has been set for HardwareProvisioning @@ -1223,9 +1223,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(8)) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), }) // Verify the provisioningState moves to failed when cluster provisioning is failed verifyProvisioningStatus(reconciledCR.Status.ProvisioningStatus, @@ -1262,14 +1262,14 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "The configuration is still being applied", }) @@ -1316,14 +1316,14 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), Message: "The configuration is up to date", }) @@ -1370,20 +1370,20 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // is also up-to-date with the current status timeout. Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "nodes.0: hostName is required", }) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.ClusterNotReady), + Reason: string(provisioningv1alpha1.CRconditionReasons.ClusterNotReady), Message: "The Cluster is not yet ready", }) // Verify the provisioningState remains progressing to reflect the on-going provisioning process @@ -1433,20 +1433,20 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // has changed to Completed. Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "nodes.0: hostName is required", }) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "The configuration is still being applied", }) @@ -1489,20 +1489,20 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // is also up-to-date with the current status timeout. Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[0], metav1.Condition{ - Type: string(utils.PRconditionTypes.Validated), + Type: string(provisioningv1alpha1.PRconditionTypes.Validated), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "nodes.0: hostName is required", }) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.TimedOut), + Reason: string(provisioningv1alpha1.CRconditionReasons.TimedOut), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.ClusterNotReady), + Reason: string(provisioningv1alpha1.CRconditionReasons.ClusterNotReady), Message: "The Cluster is not yet ready", }) // Verify the provisioningState has changed to failed as on-going provisioning process has reached to @@ -1554,20 +1554,20 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // has changed to Completed Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[1], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterInstanceRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterInstanceRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "spec.nodes[0].templateRefs must be provided", }) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), Message: "The configuration is up to date", }) @@ -1621,14 +1621,14 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[7], metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), }) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), Message: "The configuration is up to date", }) @@ -1686,9 +1686,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Expect(len(conditions)).To(Equal(9)) verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "The configuration is still being applied", }) @@ -1797,7 +1797,7 @@ var _ = Describe("ProvisioningRequestReconcile", func() { createNodeResources(ctx, c, nodePool.Name) provisionedCond := metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionFalse, } cr.Status.Conditions = append(cr.Status.Conditions, provisionedCond) @@ -1821,9 +1821,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { conditions := reconciledCR.Status.Conditions // Verify the ProvisioningRequest's status conditions verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "The configuration is still being applied", }) }) @@ -1834,7 +1834,7 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Expect(c.Status().Update(ctx, policy)).To(Succeed()) // Complete the cluster provisioning. cr.Status.Conditions[0].Status = metav1.ConditionTrue - cr.Status.Conditions[0].Reason = string(utils.CRconditionReasons.Completed) + cr.Status.Conditions[0].Reason = string(provisioningv1alpha1.CRconditionReasons.Completed) Expect(c.Status().Update(ctx, cr)).To(Succeed()) // Start reconciliation. result, err := reconciler.Reconcile(ctx, req) @@ -1847,9 +1847,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { // Verify the ProvisioningRequest's status conditions conditions := reconciledCR.Status.Conditions verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), Message: "The configuration is up to date", }) }) @@ -1872,9 +1872,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { conditions := reconciledCR.Status.Conditions // Verify the ProvisioningRequest's status conditions verifyStatusCondition(conditions[8], metav1.Condition{ - Type: string(utils.PRconditionTypes.ConfigurationApplied), + Type: string(provisioningv1alpha1.PRconditionTypes.ConfigurationApplied), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "The configuration is still being applied", }) }) @@ -2174,9 +2174,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { createNodeResources(ctx, c, nodePool.Name) provisionedCond := metav1.Condition{ - Type: string(utils.PRconditionTypes.ClusterProvisioned), + Type: string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), } cr.Status.Conditions = append(cr.Status.Conditions, provisionedCond) cr.Status.Extensions.ClusterDetails = &provisioningv1alpha1.ClusterDetails{} @@ -2209,8 +2209,8 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Status: provisioningv1alpha1.ClusterTemplateStatus{ Conditions: []metav1.Condition{ { - Type: string(utils.CTconditionTypes.Validated), - Reason: string(utils.CTconditionReasons.Completed), + Type: string(provisioningv1alpha1.CTconditionTypes.Validated), + Reason: string(provisioningv1alpha1.CTconditionReasons.Completed), Status: metav1.ConditionTrue, }, }, @@ -2229,9 +2229,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Expect(c.Get(ctx, req.NamespacedName, reconciledCR)).To(Succeed()) verifyStatusCondition(reconciledCR.Status.Conditions[9], metav1.Condition{ - Type: string(utils.PRconditionTypes.UpgradeCompleted), + Type: string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "Upgrade is in progress", }) @@ -2274,9 +2274,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Expect(c.Get(ctx, req.NamespacedName, reconciledCR)).To(Succeed()) verifyStatusCondition(reconciledCR.Status.Conditions[9], metav1.Condition{ - Type: string(utils.PRconditionTypes.UpgradeCompleted), + Type: string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.InProgress), + Reason: string(provisioningv1alpha1.CRconditionReasons.InProgress), Message: "Upgrade is in progress", }) @@ -2324,9 +2324,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Expect(c.Get(ctx, req.NamespacedName, reconciledCR)).To(Succeed()) verifyStatusCondition(reconciledCR.Status.Conditions[9], metav1.Condition{ - Type: string(utils.PRconditionTypes.UpgradeCompleted), + Type: string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted), Status: metav1.ConditionTrue, - Reason: string(utils.CRconditionReasons.Completed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Completed), Message: "Upgrade is completed", }) @@ -2380,9 +2380,9 @@ var _ = Describe("ProvisioningRequestReconcile", func() { Expect(c.Get(ctx, req.NamespacedName, reconciledCR)).To(Succeed()) verifyStatusCondition(reconciledCR.Status.Conditions[9], metav1.Condition{ - Type: string(utils.PRconditionTypes.UpgradeCompleted), + Type: string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "Upgrade Failed: Action Prep failed: pre-cache failed", }) diff --git a/internal/controllers/provisioningrequest_hwprovision.go b/internal/controllers/provisioningrequest_hwprovision.go index 8f7405b1..a6503c7d 100644 --- a/internal/controllers/provisioningrequest_hwprovision.go +++ b/internal/controllers/provisioningrequest_hwprovision.go @@ -134,15 +134,15 @@ func (t *provisioningRequestReconcilerTask) updateClusterInstance(ctx context.Co if configErr != nil { msg := "Failed to apply node configuration to the rendered ClusterInstance: " + configErr.Error() utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.HardwareNodeConfigApplied, - utils.CRconditionReasons.NotApplied, + provisioningv1alpha1.PRconditionTypes.HardwareNodeConfigApplied, + provisioningv1alpha1.CRconditionReasons.NotApplied, metav1.ConditionFalse, msg) utils.SetProvisioningStateFailed(t.object, msg) } else { utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.HardwareNodeConfigApplied, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.HardwareNodeConfigApplied, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "Node configuration has been applied to the rendered ClusterInstance") } @@ -333,7 +333,7 @@ func (t *provisioningRequestReconcilerTask) updateHardwareStatus( default: // Condition not found, set the status to unknown. status = metav1.ConditionUnknown - reason = string(utils.CRconditionReasons.Unknown) + reason = string(provisioningv1alpha1.CRconditionReasons.Unknown) message = "Unknown state of hardware provisioning" } @@ -355,15 +355,15 @@ func (t *provisioningRequestReconcilerTask) updateHardwareStatus( } } - conditionType := utils.PRconditionTypes.HardwareProvisioned + conditionType := provisioningv1alpha1.PRconditionTypes.HardwareProvisioned if condition == hwv1alpha1.Configured { - conditionType = utils.PRconditionTypes.HardwareConfigured + conditionType = provisioningv1alpha1.PRconditionTypes.HardwareConfigured } // Set the status condition for hardware status. utils.SetStatusCondition(&t.object.Status.Conditions, conditionType, - utils.ConditionReason(reason), + provisioningv1alpha1.ConditionReason(reason), status, message) @@ -452,8 +452,8 @@ func (t *provisioningRequestReconcilerTask) handleRenderHardwareTemplate(ctx con if err := t.checkExistingNodePool(ctx, clusterInstance, hwTemplate, nodePool); err != nil { if utils.IsInputError(err) { - updateErr := utils.UpdateHardwareTemplateStatusCondition(ctx, t.client, hwTemplate, utils.ConditionType(hwv1alpha1.Validation), - utils.ConditionReason(hwv1alpha1.Failed), metav1.ConditionFalse, err.Error()) + updateErr := utils.UpdateHardwareTemplateStatusCondition(ctx, t.client, hwTemplate, provisioningv1alpha1.ConditionType(hwv1alpha1.Validation), + provisioningv1alpha1.ConditionReason(hwv1alpha1.Failed), metav1.ConditionFalse, err.Error()) if updateErr != nil { // nolint: wrapcheck return nil, updateErr @@ -463,8 +463,8 @@ func (t *provisioningRequestReconcilerTask) handleRenderHardwareTemplate(ctx con } // The HardwareTemplate is validated by the CRD schema and no additional validation is needed - updateErr := utils.UpdateHardwareTemplateStatusCondition(ctx, t.client, hwTemplate, utils.ConditionType(hwv1alpha1.Validation), - utils.ConditionReason(hwv1alpha1.Completed), metav1.ConditionTrue, "Validated") + updateErr := utils.UpdateHardwareTemplateStatusCondition(ctx, t.client, hwTemplate, provisioningv1alpha1.ConditionType(hwv1alpha1.Validation), + provisioningv1alpha1.ConditionReason(hwv1alpha1.Completed), metav1.ConditionTrue, "Validated") if updateErr != nil { // nolint: wrapcheck return nil, updateErr diff --git a/internal/controllers/provisioningrequest_hwprovision_test.go b/internal/controllers/provisioningrequest_hwprovision_test.go index f7fb57f1..048d75fd 100644 --- a/internal/controllers/provisioningrequest_hwprovision_test.go +++ b/internal/controllers/provisioningrequest_hwprovision_test.go @@ -115,8 +115,8 @@ var _ = Describe("renderHardwareTemplate", func() { Status: provisioningv1alpha1.ClusterTemplateStatus{ Conditions: []metav1.Condition{ { - Type: string(utils.CTconditionTypes.Validated), - Reason: string(utils.CTconditionReasons.Completed), + Type: string(provisioningv1alpha1.CTconditionTypes.Validated), + Reason: string(provisioningv1alpha1.CTconditionReasons.Completed), Status: metav1.ConditionTrue, }, }, @@ -286,12 +286,12 @@ var _ = Describe("renderHardwareTemplate", func() { Message: "unallowed change detected", }) - cond := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareTemplateRendered)) + cond := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered)) Expect(cond).ToNot(BeNil()) verifyStatusCondition(*cond, metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareTemplateRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "Failed to render the Hardware template", }) }) @@ -335,12 +335,12 @@ var _ = Describe("renderHardwareTemplate", func() { Message: "unallowed change detected", }) - cond := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareTemplateRendered)) + cond := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered)) Expect(cond).ToNot(BeNil()) verifyStatusCondition(*cond, metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareTemplateRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "Failed to render the Hardware template", }) }) @@ -390,12 +390,12 @@ var _ = Describe("renderHardwareTemplate", func() { Message: errMessage, }) - cond := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareTemplateRendered)) + cond := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered)) Expect(cond).ToNot(BeNil()) verifyStatusCondition(*cond, metav1.Condition{ - Type: string(utils.PRconditionTypes.HardwareTemplateRendered), + Type: string(provisioningv1alpha1.PRconditionTypes.HardwareTemplateRendered), Status: metav1.ConditionFalse, - Reason: string(utils.CRconditionReasons.Failed), + Reason: string(provisioningv1alpha1.CRconditionReasons.Failed), Message: "Failed to render the Hardware template", }) }) @@ -483,7 +483,7 @@ var _ = Describe("waitForNodePoolProvision", func() { Expect(provisioned).To(Equal(false)) Expect(timedOutOrFailed).To(Equal(true)) // It should be failed Expect(err).ToNot(HaveOccurred()) - condition := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareProvisioned)) + condition := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned)) Expect(condition).ToNot(BeNil()) Expect(condition.Status).To(Equal(metav1.ConditionFalse)) Expect(condition.Reason).To(Equal(string(hwv1alpha1.Failed))) @@ -513,7 +513,7 @@ var _ = Describe("waitForNodePoolProvision", func() { Expect(timedOutOrFailed).To(Equal(true)) // Now it should time out Expect(err).ToNot(HaveOccurred()) - condition := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareProvisioned)) + condition := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned)) Expect(condition).ToNot(BeNil()) Expect(condition.Status).To(Equal(metav1.ConditionFalse)) Expect(condition.Reason).To(Equal(string(hwv1alpha1.TimedOut))) @@ -531,7 +531,7 @@ var _ = Describe("waitForNodePoolProvision", func() { Expect(provisioned).To(Equal(false)) Expect(timedOutOrFailed).To(Equal(false)) Expect(err).ToNot(HaveOccurred()) - condition := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareProvisioned)) + condition := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned)) Expect(condition).ToNot(BeNil()) Expect(condition.Status).To(Equal(metav1.ConditionFalse)) }) @@ -547,7 +547,7 @@ var _ = Describe("waitForNodePoolProvision", func() { Expect(provisioned).To(Equal(true)) Expect(timedOutOrFailed).To(Equal(false)) Expect(err).ToNot(HaveOccurred()) - condition := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareProvisioned)) + condition := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareProvisioned)) Expect(condition).ToNot(BeNil()) Expect(condition.Status).To(Equal(metav1.ConditionTrue)) }) @@ -583,7 +583,7 @@ var _ = Describe("waitForNodePoolProvision", func() { Expect(timedOutOrFailed).To(Equal(true)) // Now it should time out Expect(err).ToNot(HaveOccurred()) - condition := meta.FindStatusCondition(cr.Status.Conditions, string(utils.PRconditionTypes.HardwareConfigured)) + condition := meta.FindStatusCondition(cr.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.HardwareConfigured)) Expect(condition).ToNot(BeNil()) Expect(condition.Status).To(Equal(metav1.ConditionFalse)) Expect(condition.Reason).To(Equal(string(hwv1alpha1.TimedOut))) diff --git a/internal/controllers/provisioningrequest_upgrade.go b/internal/controllers/provisioningrequest_upgrade.go index 919bfc9e..f6369b3f 100644 --- a/internal/controllers/provisioningrequest_upgrade.go +++ b/internal/controllers/provisioningrequest_upgrade.go @@ -6,6 +6,7 @@ import ( "github.com/coreos/go-semver/semver" ibgu "github.com/openshift-kni/cluster-group-upgrades-operator/pkg/api/imagebasedgroupupgrades/v1alpha1" + provisioningv1alpha1 "github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1" "github.com/openshift-kni/oran-o2ims/internal/controllers/utils" siteconfig "github.com/stolostron/siteconfig/api/v1alpha1" "k8s.io/apimachinery/pkg/api/errors" @@ -89,8 +90,8 @@ func (t *provisioningRequestReconcilerTask) handleUpgrade( ) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.UpgradeCompleted, - utils.CRconditionReasons.InProgress, + provisioningv1alpha1.PRconditionTypes.UpgradeCompleted, + provisioningv1alpha1.CRconditionReasons.InProgress, metav1.ConditionFalse, "Upgrade is initiated", ) @@ -106,8 +107,8 @@ func (t *provisioningRequestReconcilerTask) handleUpgrade( if isIBGUProgressing(ibgu) { utils.SetProvisioningStateInProgress(t.object, "Cluster upgrade is in progress") utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.UpgradeCompleted, - utils.CRconditionReasons.InProgress, + provisioningv1alpha1.PRconditionTypes.UpgradeCompleted, + provisioningv1alpha1.CRconditionReasons.InProgress, metav1.ConditionFalse, "Upgrade is in progress", ) @@ -125,16 +126,16 @@ func (t *provisioningRequestReconcilerTask) handleUpgrade( if failed, message := isIBGUFailed(ibgu); failed { utils.SetProvisioningStateFailed(t.object, "Cluster upgrade is failed") utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.UpgradeCompleted, - utils.CRconditionReasons.Failed, + provisioningv1alpha1.PRconditionTypes.UpgradeCompleted, + provisioningv1alpha1.CRconditionReasons.Failed, metav1.ConditionFalse, message, ) } else { utils.SetProvisioningStateFulfilled(t.object) utils.SetStatusCondition(&t.object.Status.Conditions, - utils.PRconditionTypes.UpgradeCompleted, - utils.CRconditionReasons.Completed, + provisioningv1alpha1.PRconditionTypes.UpgradeCompleted, + provisioningv1alpha1.CRconditionReasons.Completed, metav1.ConditionTrue, "Upgrade is completed", ) @@ -158,7 +159,7 @@ func (t *provisioningRequestReconcilerTask) handleUpgrade( if err != nil { return requeueWithError(fmt.Errorf("failed to cleanup IBGU: %w", err)) } - meta.RemoveStatusCondition(&t.object.Status.Conditions, string(utils.PRconditionTypes.UpgradeCompleted)) + meta.RemoveStatusCondition(&t.object.Status.Conditions, string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted)) if err := utils.UpdateK8sCRStatus(ctx, t.client, t.object); err != nil { return requeueWithError(fmt.Errorf("failed to update ClusterRequest CR status: %w", err)) } diff --git a/internal/controllers/utils/conditions.go b/internal/controllers/utils/conditions.go index 84dc7bf1..e10bc1dd 100644 --- a/internal/controllers/utils/conditions.go +++ b/internal/controllers/utils/conditions.go @@ -8,80 +8,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ConditionType is a string representing the condition's type -type ConditionType string - -// The following constants define the different types of conditions that will be set for ClusterTemplate -var CTconditionTypes = struct { - Validated ConditionType -}{ - Validated: "ClusterTemplateValidated", -} - -// The following constants define the different types of conditions that will be set for ProvisioningRequest -var PRconditionTypes = struct { - Validated ConditionType - HardwareTemplateRendered ConditionType - HardwareProvisioned ConditionType - HardwareNodeConfigApplied ConditionType - HardwareConfigured ConditionType - ClusterInstanceRendered ConditionType - ClusterResourcesCreated ConditionType - ClusterInstanceProcessed ConditionType - ClusterProvisioned ConditionType - ConfigurationApplied ConditionType - UpgradeCompleted ConditionType -}{ - Validated: "ProvisioningRequestValidated", - HardwareTemplateRendered: "HardwareTemplateRendered", - HardwareProvisioned: "HardwareProvisioned", - HardwareNodeConfigApplied: "HardwareNodeConfigApplied", - HardwareConfigured: "HardwareConfigured", - ClusterInstanceRendered: "ClusterInstanceRendered", - ClusterResourcesCreated: "ClusterResourcesCreated", - ClusterInstanceProcessed: "ClusterInstanceProcessed", - ClusterProvisioned: "ClusterProvisioned", - ConfigurationApplied: "ConfigurationApplied", - UpgradeCompleted: "UpgradeCompleted", -} - -// ConditionReason is a string representing the condition's reason -type ConditionReason string - -// The following constants define the different reasons that conditions will be set for ClusterTemplate -var CTconditionReasons = struct { - Completed ConditionReason - Failed ConditionReason -}{ - Completed: "Completed", - Failed: "Failed", -} - -// The following constants define the different reasons that conditions will be set for ProvisioningRequest -var CRconditionReasons = struct { - NotApplied ConditionReason - ClusterNotReady ConditionReason - Completed ConditionReason - Failed ConditionReason - InProgress ConditionReason - Missing ConditionReason - OutOfDate ConditionReason - TimedOut ConditionReason - Unknown ConditionReason -}{ - NotApplied: "NotApplied", - ClusterNotReady: "ClusterNotReady", - Completed: "Completed", - Failed: "Failed", - InProgress: "InProgress", - Missing: "Missing", - OutOfDate: "OutOfDate", - TimedOut: "TimedOut", - Unknown: "Unknown", -} - // SetStatusCondition is a convenience wrapper for meta.SetStatusCondition that takes in the types defined here and converts them to strings -func SetStatusCondition(existingConditions *[]metav1.Condition, conditionType ConditionType, conditionReason ConditionReason, conditionStatus metav1.ConditionStatus, message string) { +func SetStatusCondition( + existingConditions *[]metav1.Condition, + conditionType provisioningv1alpha1.ConditionType, + conditionReason provisioningv1alpha1.ConditionReason, + conditionStatus metav1.ConditionStatus, + message string, +) { meta.SetStatusCondition( existingConditions, metav1.Condition{ @@ -125,7 +59,8 @@ func IsProvisioningStateFulfilled(cr *provisioningv1alpha1.ProvisioningRequest) // IsClusterProvisionPresent checks if the cluster provision condition is present func IsClusterProvisionPresent(cr *provisioningv1alpha1.ProvisioningRequest) bool { - condition := meta.FindStatusCondition(cr.Status.Conditions, (string(PRconditionTypes.ClusterProvisioned))) + condition := meta.FindStatusCondition(cr.Status.Conditions, + string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned)) return condition != nil } @@ -136,17 +71,21 @@ func IsClusterProvisionPresent(cr *provisioningv1alpha1.ProvisioningRequest) boo // but the status wasn't updated correctly. Therefore, we treat it as completed so that the provisioningStatus // be updated properly. This workaround can be removed after ACM 2.12 GA. func IsClusterProvisionCompleted(cr *provisioningv1alpha1.ProvisioningRequest) bool { - condition := meta.FindStatusCondition(cr.Status.Conditions, (string(PRconditionTypes.ClusterProvisioned))) - return condition != nil && (condition.Status == metav1.ConditionTrue || condition.Reason == string(siteconfigv1alpha1.StaleConditions)) + condition := meta.FindStatusCondition(cr.Status.Conditions, + string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned)) + return condition != nil && + (condition.Status == metav1.ConditionTrue || + condition.Reason == string(siteconfigv1alpha1.StaleConditions)) } // IsClusterProvisionTimedOutOrFailed checks if the cluster provision condition status is timedout or failed func IsClusterProvisionTimedOutOrFailed(cr *provisioningv1alpha1.ProvisioningRequest) bool { - condition := meta.FindStatusCondition(cr.Status.Conditions, (string(PRconditionTypes.ClusterProvisioned))) + condition := meta.FindStatusCondition(cr.Status.Conditions, + string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned)) if condition != nil { if condition.Status == metav1.ConditionFalse && - (condition.Reason == string(CRconditionReasons.Failed) || - condition.Reason == string(CRconditionReasons.TimedOut)) { + (condition.Reason == string(provisioningv1alpha1.CRconditionReasons.Failed) || + condition.Reason == string(provisioningv1alpha1.CRconditionReasons.TimedOut)) { return true } } @@ -155,8 +94,9 @@ func IsClusterProvisionTimedOutOrFailed(cr *provisioningv1alpha1.ProvisioningReq // IsClusterProvisionFailed checks if the cluster provision condition status is failed func IsClusterProvisionFailed(cr *provisioningv1alpha1.ProvisioningRequest) bool { - condition := meta.FindStatusCondition(cr.Status.Conditions, (string(PRconditionTypes.ClusterProvisioned))) - return condition != nil && condition.Reason == string(CRconditionReasons.Failed) + condition := meta.FindStatusCondition(cr.Status.Conditions, + string(provisioningv1alpha1.PRconditionTypes.ClusterProvisioned)) + return condition != nil && condition.Reason == string(provisioningv1alpha1.CRconditionReasons.Failed) } // IsSmoRegistrationCompleted checks if registration with SMO has been completed @@ -168,9 +108,11 @@ func IsSmoRegistrationCompleted(cr *inventoryv1alpha1.Inventory) bool { // IsClusterUpgradeInProgress checks if the cluster upgrade condition status is in progress func IsClusterUpgradeInProgress(cr *provisioningv1alpha1.ProvisioningRequest) bool { - condition := meta.FindStatusCondition(cr.Status.Conditions, (string(PRconditionTypes.UpgradeCompleted))) + condition := meta.FindStatusCondition(cr.Status.Conditions, + string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted)) if condition != nil { - if condition.Status == metav1.ConditionFalse && condition.Reason == string(CRconditionReasons.InProgress) { + if condition.Status == metav1.ConditionFalse && + condition.Reason == string(provisioningv1alpha1.CRconditionReasons.InProgress) { return true } } @@ -179,7 +121,8 @@ func IsClusterUpgradeInProgress(cr *provisioningv1alpha1.ProvisioningRequest) bo // IsClusterUpgradeCompleted checks if the cluster upgrade is completed func IsClusterUpgradeCompleted(cr *provisioningv1alpha1.ProvisioningRequest) bool { - condition := meta.FindStatusCondition(cr.Status.Conditions, (string(PRconditionTypes.UpgradeCompleted))) + condition := meta.FindStatusCondition(cr.Status.Conditions, + string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted)) if condition != nil { if condition.Status == metav1.ConditionTrue { return true @@ -190,6 +133,7 @@ func IsClusterUpgradeCompleted(cr *provisioningv1alpha1.ProvisioningRequest) boo // IsClusterUpgradeInitiated checks if the cluster upgrade is initiated func IsClusterUpgradeInitiated(cr *provisioningv1alpha1.ProvisioningRequest) bool { - condition := meta.FindStatusCondition(cr.Status.Conditions, (string(PRconditionTypes.UpgradeCompleted))) + condition := meta.FindStatusCondition(cr.Status.Conditions, + string(provisioningv1alpha1.PRconditionTypes.UpgradeCompleted)) return condition != nil } diff --git a/internal/controllers/utils/hardware_utils.go b/internal/controllers/utils/hardware_utils.go index adc4034e..a56449aa 100644 --- a/internal/controllers/utils/hardware_utils.go +++ b/internal/controllers/utils/hardware_utils.go @@ -10,6 +10,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" hwv1alpha1 "github.com/openshift-kni/oran-o2ims/api/hardwaremanagement/v1alpha1" + provisioningv1alpha1 "github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1" siteconfig "github.com/stolostron/siteconfig/api/v1alpha1" corev1 "k8s.io/api/core/v1" @@ -439,7 +440,8 @@ func SetNodePoolLabels(nodePool *hwv1alpha1.NodePool, label, value string) { // UpdateHardwareTemplateStatusCondition updates the status condition of the HardwareTemplate resource func UpdateHardwareTemplateStatusCondition(ctx context.Context, c client.Client, hardwareTemplate *hwv1alpha1.HardwareTemplate, - conditionType ConditionType, conditionReason ConditionReason, conditionStatus metav1.ConditionStatus, message string) error { + conditionType provisioningv1alpha1.ConditionType, conditionReason provisioningv1alpha1.ConditionReason, + conditionStatus metav1.ConditionStatus, message string) error { SetStatusCondition(&hardwareTemplate.Status.Conditions, conditionType, @@ -469,8 +471,8 @@ func GetTimeoutFromHWTemplate(ctx context.Context, c client.Client, name string) if err != nil { errMessage := fmt.Sprintf("the value of HardwareProvisioningTimeout from hardware template %s is not a valid duration string: %v", name, err) - updateErr := UpdateHardwareTemplateStatusCondition(ctx, c, hwTemplate, ConditionType(hwv1alpha1.Validation), - ConditionReason(hwv1alpha1.Failed), metav1.ConditionFalse, errMessage) + updateErr := UpdateHardwareTemplateStatusCondition(ctx, c, hwTemplate, provisioningv1alpha1.ConditionType(hwv1alpha1.Validation), + provisioningv1alpha1.ConditionReason(hwv1alpha1.Failed), metav1.ConditionFalse, errMessage) if updateErr != nil { // nolint: wrapcheck return 0, updateErr diff --git a/internal/controllers/utils/provision_test.go b/internal/controllers/utils/provision_test.go index 763f5c6f..99fea2e8 100644 --- a/internal/controllers/utils/provision_test.go +++ b/internal/controllers/utils/provision_test.go @@ -592,19 +592,19 @@ var _ = Describe("ClusterIsReadyForPolicyConfig", func() { Expect(err).ToNot(HaveOccurred()) Expect(managedClusterExists).To(BeTrue()) SetStatusCondition(&managedCluster1.Status.Conditions, - ConditionType(clusterv1.ManagedClusterConditionAvailable), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionAvailable), "ManagedClusterAvailable", metav1.ConditionFalse, "Managed cluster is available", ) SetStatusCondition(&managedCluster1.Status.Conditions, - ConditionType(clusterv1.ManagedClusterConditionHubAccepted), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionHubAccepted), "HubClusterAdminAccepted", metav1.ConditionTrue, "Accepted by hub cluster admin", ) SetStatusCondition(&managedCluster1.Status.Conditions, - ConditionType(clusterv1.ManagedClusterConditionJoined), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionJoined), "ManagedClusterJoined", metav1.ConditionTrue, "Managed cluster joined", @@ -625,19 +625,19 @@ var _ = Describe("ClusterIsReadyForPolicyConfig", func() { Expect(err).ToNot(HaveOccurred()) Expect(managedClusterExists).To(BeTrue()) SetStatusCondition(&managedCluster1.Status.Conditions, - ConditionType(clusterv1.ManagedClusterConditionAvailable), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionAvailable), "ManagedClusterAvailable", metav1.ConditionTrue, "Managed cluster is available", ) SetStatusCondition(&managedCluster1.Status.Conditions, - ConditionType(clusterv1.ManagedClusterConditionHubAccepted), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionHubAccepted), "HubClusterAdminAccepted", metav1.ConditionTrue, "Accepted by hub cluster admin", ) SetStatusCondition(&managedCluster1.Status.Conditions, - ConditionType(clusterv1.ManagedClusterConditionJoined), + provisioningv1alpha1.ConditionType(clusterv1.ManagedClusterConditionJoined), "ManagedClusterJoined", metav1.ConditionTrue, "Managed cluster joined", diff --git a/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/conditions.go b/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/conditions.go new file mode 100644 index 00000000..e9f89a13 --- /dev/null +++ b/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/conditions.go @@ -0,0 +1,73 @@ +package v1alpha1 + +// ConditionType is a string representing the condition's type +type ConditionType string + +// The following constants define the different types of conditions that will be set for ClusterTemplate +var CTconditionTypes = struct { + Validated ConditionType +}{ + Validated: "ClusterTemplateValidated", +} + +// The following constants define the different types of conditions that will be set for ProvisioningRequest +var PRconditionTypes = struct { + Validated ConditionType + HardwareTemplateRendered ConditionType + HardwareProvisioned ConditionType + HardwareNodeConfigApplied ConditionType + HardwareConfigured ConditionType + ClusterInstanceRendered ConditionType + ClusterResourcesCreated ConditionType + ClusterInstanceProcessed ConditionType + ClusterProvisioned ConditionType + ConfigurationApplied ConditionType + UpgradeCompleted ConditionType +}{ + Validated: "ProvisioningRequestValidated", + HardwareTemplateRendered: "HardwareTemplateRendered", + HardwareProvisioned: "HardwareProvisioned", + HardwareNodeConfigApplied: "HardwareNodeConfigApplied", + HardwareConfigured: "HardwareConfigured", + ClusterInstanceRendered: "ClusterInstanceRendered", + ClusterResourcesCreated: "ClusterResourcesCreated", + ClusterInstanceProcessed: "ClusterInstanceProcessed", + ClusterProvisioned: "ClusterProvisioned", + ConfigurationApplied: "ConfigurationApplied", + UpgradeCompleted: "UpgradeCompleted", +} + +// ConditionReason is a string representing the condition's reason +type ConditionReason string + +// The following constants define the different reasons that conditions will be set for ClusterTemplate +var CTconditionReasons = struct { + Completed ConditionReason + Failed ConditionReason +}{ + Completed: "Completed", + Failed: "Failed", +} + +// The following constants define the different reasons that conditions will be set for ProvisioningRequest +var CRconditionReasons = struct { + NotApplied ConditionReason + ClusterNotReady ConditionReason + Completed ConditionReason + Failed ConditionReason + InProgress ConditionReason + Missing ConditionReason + OutOfDate ConditionReason + TimedOut ConditionReason + Unknown ConditionReason +}{ + NotApplied: "NotApplied", + ClusterNotReady: "ClusterNotReady", + Completed: "Completed", + Failed: "Failed", + InProgress: "InProgress", + Missing: "Missing", + OutOfDate: "OutOfDate", + TimedOut: "TimedOut", + Unknown: "Unknown", +} diff --git a/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_validation.go b/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_validation.go index 691267d6..4b914ec6 100644 --- a/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_validation.go +++ b/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_validation.go @@ -244,7 +244,7 @@ func (r *ProvisioningRequest) GetClusterTemplateRef(ctx context.Context, client if ct.Name == clusterTemplateRefName { validatedCond := meta.FindStatusCondition( ct.Status.Conditions, - "ClusterTemplateValidated") // TODO: consider exposing the conditions in the API + string(CTconditionTypes.Validated)) if validatedCond != nil && validatedCond.Status == metav1.ConditionTrue { return &ct, nil } diff --git a/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_webhook.go b/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_webhook.go index dd489ef1..c086b46e 100644 --- a/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_webhook.go +++ b/vendor/github.com/openshift-kni/oran-o2ims/api/provisioning/v1alpha1/provisioningrequest_webhook.go @@ -122,9 +122,9 @@ func (r *ProvisioningRequest) validateCreateOrUpdate(oldPr *ProvisioningRequest) // Once provisioning has started or reached a final state (Completed or Failed), // updates to immutable fields in the ClusterInstance input are disallowed, // with the exception of scaling up/down when Cluster provisioning is completed. - // TODO: consider exposing the conditions in the API. - crProvisionedCond := meta.FindStatusCondition(r.Status.Conditions, "ClusterProvisioned") - if crProvisionedCond != nil && crProvisionedCond.Reason != "Unknown" { + crProvisionedCond := meta.FindStatusCondition( + r.Status.Conditions, string(PRconditionTypes.ClusterProvisioned)) + if crProvisionedCond != nil && crProvisionedCond.Reason != string(CRconditionReasons.Unknown) { oldPrClusterInstanceInput, err := ExtractMatchingInput( oldPr.Spec.TemplateParameters.Raw, TemplateParamClusterInstance) if err != nil {