From 9a2e195d89636316554f8e13e37cb2f5a278b11e Mon Sep 17 00:00:00 2001 From: enxebre Date: Mon, 13 Jan 2025 14:59:50 +0100 Subject: [PATCH] Relax cel and fix tests to satisfy new labels constraints --- hack/validation/labels.sh | 2 -- hack/validation/requirements.sh | 4 ---- kwok/charts/crds/karpenter.sh_nodeclaims.yaml | 4 ---- kwok/charts/crds/karpenter.sh_nodepools.yaml | 8 ------- pkg/apis/crds/karpenter.sh_nodeclaims.yaml | 4 ---- pkg/apis/crds/karpenter.sh_nodepools.yaml | 8 ------- pkg/apis/v1/nodeclaim_validation_cel_test.go | 8 +++---- pkg/apis/v1/nodepool_validation_cel_test.go | 22 ++++++++--------- .../nodeclaim/lifecycle/termination_test.go | 9 +++---- .../provisioning/scheduling/suite_test.go | 24 +++++++++---------- pkg/controllers/provisioning/suite_test.go | 8 +++---- 11 files changed, 36 insertions(+), 65 deletions(-) diff --git a/hack/validation/labels.sh b/hack/validation/labels.sh index 8debb6323b..c2b76cbfef 100755 --- a/hack/validation/labels.sh +++ b/hack/validation/labels.sh @@ -4,8 +4,6 @@ # checking for restricted labels while filtering out well-known labels yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.maxProperties = 100' -i pkg/apis/crds/karpenter.sh_nodepools.yaml yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.metadata.properties.labels.x-kubernetes-validations += [ - {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self.all(x, x in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || x.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || x.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !x.find(\"^([^/]+)\").endsWith(\"kubernetes.io\"))"}, - {"message": "label domain \"k8s.io\" is restricted", "rule": "self.all(x, x.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !x.find(\"^([^/]+)\").endsWith(\"k8s.io\"))"}, {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self.all(x, x in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !x.find(\"^([^/]+)\").endsWith(\"karpenter.sh\"))"}, {"message": "label \"karpenter.sh/nodepool\" is restricted", "rule": "self.all(x, x != \"karpenter.sh/nodepool\")"}, {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self.all(x, x != \"kubernetes.io/hostname\")"}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml diff --git a/hack/validation/requirements.sh b/hack/validation/requirements.sh index 43c53d834f..530f5f0969 100755 --- a/hack/validation/requirements.sh +++ b/hack/validation/requirements.sh @@ -5,8 +5,6 @@ yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.req yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml ## checking for restricted labels while filtering out well-known labels yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ - {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"node.kubernetes.io/instance-type\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || self.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || self.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !self.find(\"^([^/]+)\").endsWith(\"kubernetes.io\")"}, - {"message": "label domain \"k8s.io\" is restricted", "rule": "self.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !self.find(\"^([^/]+)\").endsWith(\"k8s.io\")"}, {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.sh\")"}, {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self != \"kubernetes.io/hostname\""}]' -i pkg/apis/crds/karpenter.sh_nodeclaims.yaml ## operator enum values @@ -21,8 +19,6 @@ yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.tem yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.pattern = "^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$"' -i pkg/apis/crds/karpenter.sh_nodepools.yaml ## checking for restricted labels while filtering out well-known labels yq eval '.spec.versions[0].schema.openAPIV3Schema.properties.spec.properties.template.properties.spec.properties.requirements.items.properties.key.x-kubernetes-validations += [ - {"message": "label domain \"kubernetes.io\" is restricted", "rule": "self in [\"beta.kubernetes.io/instance-type\", \"failure-domain.beta.kubernetes.io/region\", \"beta.kubernetes.io/os\", \"beta.kubernetes.io/arch\", \"failure-domain.beta.kubernetes.io/zone\", \"topology.kubernetes.io/zone\", \"topology.kubernetes.io/region\", \"node.kubernetes.io/instance-type\", \"kubernetes.io/arch\", \"kubernetes.io/os\", \"node.kubernetes.io/windows-build\"] || self.find(\"^([^/]+)\").endsWith(\"node.kubernetes.io\") || self.find(\"^([^/]+)\").endsWith(\"node-restriction.kubernetes.io\") || !self.find(\"^([^/]+)\").endsWith(\"kubernetes.io\")"}, - {"message": "label domain \"k8s.io\" is restricted", "rule": "self.find(\"^([^/]+)\").endsWith(\"kops.k8s.io\") || !self.find(\"^([^/]+)\").endsWith(\"k8s.io\")"}, {"message": "label domain \"karpenter.sh\" is restricted", "rule": "self in [\"karpenter.sh/capacity-type\", \"karpenter.sh/nodepool\"] || !self.find(\"^([^/]+)\").endsWith(\"karpenter.sh\")"}, {"message": "label \"karpenter.sh/nodepool\" is restricted", "rule": "self != \"karpenter.sh/nodepool\""}, {"message": "label \"kubernetes.io/hostname\" is restricted", "rule": "self != \"kubernetes.io/hostname\""}]' -i pkg/apis/crds/karpenter.sh_nodepools.yaml diff --git a/kwok/charts/crds/karpenter.sh_nodeclaims.yaml b/kwok/charts/crds/karpenter.sh_nodeclaims.yaml index 1150d87ca5..1b052e2e9c 100644 --- a/kwok/charts/crds/karpenter.sh_nodeclaims.yaml +++ b/kwok/charts/crds/karpenter.sh_nodeclaims.yaml @@ -125,10 +125,6 @@ spec: maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ x-kubernetes-validations: - - message: label domain "kubernetes.io" is restricted - rule: self in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "node.kubernetes.io/instance-type", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || self.find("^([^/]+)").endsWith("node.kubernetes.io") || self.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !self.find("^([^/]+)").endsWith("kubernetes.io") - - message: label domain "k8s.io" is restricted - rule: self.find("^([^/]+)").endsWith("kops.k8s.io") || !self.find("^([^/]+)").endsWith("k8s.io") - message: label domain "karpenter.sh" is restricted rule: self in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !self.find("^([^/]+)").endsWith("karpenter.sh") - message: label "kubernetes.io/hostname" is restricted diff --git a/kwok/charts/crds/karpenter.sh_nodepools.yaml b/kwok/charts/crds/karpenter.sh_nodepools.yaml index 84ba89082f..0db73980d3 100644 --- a/kwok/charts/crds/karpenter.sh_nodepools.yaml +++ b/kwok/charts/crds/karpenter.sh_nodepools.yaml @@ -196,10 +196,6 @@ spec: type: object maxProperties: 100 x-kubernetes-validations: - - message: label domain "kubernetes.io" is restricted - rule: self.all(x, x in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || x.find("^([^/]+)").endsWith("node.kubernetes.io") || x.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !x.find("^([^/]+)").endsWith("kubernetes.io")) - - message: label domain "k8s.io" is restricted - rule: self.all(x, x.find("^([^/]+)").endsWith("kops.k8s.io") || !x.find("^([^/]+)").endsWith("k8s.io")) - message: label domain "karpenter.sh" is restricted rule: self.all(x, x in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !x.find("^([^/]+)").endsWith("karpenter.sh")) - message: label "karpenter.sh/nodepool" is restricted @@ -267,10 +263,6 @@ spec: maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ x-kubernetes-validations: - - message: label domain "kubernetes.io" is restricted - rule: self in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "node.kubernetes.io/instance-type", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || self.find("^([^/]+)").endsWith("node.kubernetes.io") || self.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !self.find("^([^/]+)").endsWith("kubernetes.io") - - message: label domain "k8s.io" is restricted - rule: self.find("^([^/]+)").endsWith("kops.k8s.io") || !self.find("^([^/]+)").endsWith("k8s.io") - message: label domain "karpenter.sh" is restricted rule: self in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !self.find("^([^/]+)").endsWith("karpenter.sh") - message: label "karpenter.sh/nodepool" is restricted diff --git a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml index 9465ed5b53..aed6486392 100644 --- a/pkg/apis/crds/karpenter.sh_nodeclaims.yaml +++ b/pkg/apis/crds/karpenter.sh_nodeclaims.yaml @@ -125,10 +125,6 @@ spec: maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ x-kubernetes-validations: - - message: label domain "kubernetes.io" is restricted - rule: self in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "node.kubernetes.io/instance-type", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || self.find("^([^/]+)").endsWith("node.kubernetes.io") || self.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !self.find("^([^/]+)").endsWith("kubernetes.io") - - message: label domain "k8s.io" is restricted - rule: self.find("^([^/]+)").endsWith("kops.k8s.io") || !self.find("^([^/]+)").endsWith("k8s.io") - message: label domain "karpenter.sh" is restricted rule: self in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !self.find("^([^/]+)").endsWith("karpenter.sh") - message: label "kubernetes.io/hostname" is restricted diff --git a/pkg/apis/crds/karpenter.sh_nodepools.yaml b/pkg/apis/crds/karpenter.sh_nodepools.yaml index 67d7d2d728..ee9b1ea9e7 100644 --- a/pkg/apis/crds/karpenter.sh_nodepools.yaml +++ b/pkg/apis/crds/karpenter.sh_nodepools.yaml @@ -196,10 +196,6 @@ spec: type: object maxProperties: 100 x-kubernetes-validations: - - message: label domain "kubernetes.io" is restricted - rule: self.all(x, x in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || x.find("^([^/]+)").endsWith("node.kubernetes.io") || x.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !x.find("^([^/]+)").endsWith("kubernetes.io")) - - message: label domain "k8s.io" is restricted - rule: self.all(x, x.find("^([^/]+)").endsWith("kops.k8s.io") || !x.find("^([^/]+)").endsWith("k8s.io")) - message: label domain "karpenter.sh" is restricted rule: self.all(x, x in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !x.find("^([^/]+)").endsWith("karpenter.sh")) - message: label "karpenter.sh/nodepool" is restricted @@ -267,10 +263,6 @@ spec: maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*(\/))?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ x-kubernetes-validations: - - message: label domain "kubernetes.io" is restricted - rule: self in ["beta.kubernetes.io/instance-type", "failure-domain.beta.kubernetes.io/region", "beta.kubernetes.io/os", "beta.kubernetes.io/arch", "failure-domain.beta.kubernetes.io/zone", "topology.kubernetes.io/zone", "topology.kubernetes.io/region", "node.kubernetes.io/instance-type", "kubernetes.io/arch", "kubernetes.io/os", "node.kubernetes.io/windows-build"] || self.find("^([^/]+)").endsWith("node.kubernetes.io") || self.find("^([^/]+)").endsWith("node-restriction.kubernetes.io") || !self.find("^([^/]+)").endsWith("kubernetes.io") - - message: label domain "k8s.io" is restricted - rule: self.find("^([^/]+)").endsWith("kops.k8s.io") || !self.find("^([^/]+)").endsWith("k8s.io") - message: label domain "karpenter.sh" is restricted rule: self in ["karpenter.sh/capacity-type", "karpenter.sh/nodepool"] || !self.find("^([^/]+)").endsWith("karpenter.sh") - message: label "karpenter.sh/nodepool" is restricted diff --git a/pkg/apis/v1/nodeclaim_validation_cel_test.go b/pkg/apis/v1/nodeclaim_validation_cel_test.go index c0d3e59b72..b6e1b04b5a 100644 --- a/pkg/apis/v1/nodeclaim_validation_cel_test.go +++ b/pkg/apis/v1/nodeclaim_validation_cel_test.go @@ -141,9 +141,9 @@ var _ = Describe("Validation", func() { Expect(env.Client.Create(ctx, nodeClaim)).ToNot(Succeed()) } }) - It("should allow restricted domains exceptions", func() { + It("should allow kubernetes domains", func() { oldNodeClaim := nodeClaim.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, } @@ -152,9 +152,9 @@ var _ = Describe("Validation", func() { nodeClaim = oldNodeClaim.DeepCopy() } }) - It("should allow restricted subdomains exceptions", func() { + It("should allow kubernetes subdomains", func() { oldNodeClaim := nodeClaim.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { nodeClaim.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "subdomain." + label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, } diff --git a/pkg/apis/v1/nodepool_validation_cel_test.go b/pkg/apis/v1/nodepool_validation_cel_test.go index a6754ea182..df20dc076e 100644 --- a/pkg/apis/v1/nodepool_validation_cel_test.go +++ b/pkg/apis/v1/nodepool_validation_cel_test.go @@ -412,9 +412,9 @@ var _ = Describe("CEL/Validation", func() { Expect(nodePool.RuntimeValidate()).ToNot(Succeed()) } }) - It("should allow restricted domains exceptions", func() { + It("should allow kubernetes domains", func() { oldNodePool := nodePool.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, } @@ -424,9 +424,9 @@ var _ = Describe("CEL/Validation", func() { nodePool = oldNodePool.DeepCopy() } }) - It("should allow restricted subdomains exceptions", func() { + It("should allow kubernetes subdomains exceptions", func() { oldNodePool := nodePool.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { nodePool.Spec.Template.Spec.Requirements = []NodeSelectorRequirementWithMinValues{ {NodeSelectorRequirement: v1.NodeSelectorRequirement{Key: "subdomain." + label + "/test", Operator: v1.NodeSelectorOpIn, Values: []string{"test"}}}, } @@ -560,9 +560,9 @@ var _ = Describe("CEL/Validation", func() { Expect(env.Client.Create(ctx, nodePool)).To(Succeed()) Expect(nodePool.RuntimeValidate()).To(Succeed()) }) - It("should allow labels in restricted domains exceptions list", func() { + It("should allow labels in kubernetes domains", func() { oldNodePool := nodePool.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { fmt.Println(label) nodePool.Spec.Template.Labels = map[string]string{ label: "test-value", @@ -573,9 +573,9 @@ var _ = Describe("CEL/Validation", func() { nodePool = oldNodePool.DeepCopy() } }) - It("should allow labels prefixed with the restricted domain exceptions", func() { + It("should allow labels prefixed with the kubernetes domain", func() { oldNodePool := nodePool.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { nodePool.Spec.Template.Labels = map[string]string{ fmt.Sprintf("%s/key", label): "test-value", } @@ -587,7 +587,7 @@ var _ = Describe("CEL/Validation", func() { }) It("should allow subdomain labels in restricted domains exceptions list", func() { oldNodePool := nodePool.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { nodePool.Spec.Template.Labels = map[string]string{ fmt.Sprintf("subdomain.%s", label): "test-value", } @@ -597,9 +597,9 @@ var _ = Describe("CEL/Validation", func() { nodePool = oldNodePool.DeepCopy() } }) - It("should allow subdomain labels prefixed with the restricted domain exceptions", func() { + It("should allow subdomain labels prefixed with the kubernetes domain", func() { oldNodePool := nodePool.DeepCopy() - for label := range LabelDomainExceptions { + for label := range K8sLabelDomains { nodePool.Spec.Template.Labels = map[string]string{ fmt.Sprintf("subdomain.%s/key", label): "test-value", } diff --git a/pkg/controllers/nodeclaim/lifecycle/termination_test.go b/pkg/controllers/nodeclaim/lifecycle/termination_test.go index 6d2d4d9238..59bf1b58f4 100644 --- a/pkg/controllers/nodeclaim/lifecycle/termination_test.go +++ b/pkg/controllers/nodeclaim/lifecycle/termination_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/karpenter/pkg/apis" v1 "sigs.k8s.io/karpenter/pkg/apis/v1" "sigs.k8s.io/karpenter/pkg/cloudprovider" "sigs.k8s.io/karpenter/pkg/cloudprovider/fake" @@ -300,7 +301,8 @@ var _ = Describe("Termination", func() { ExpectExists(ctx, env.Client, node) nodeClaim = ExpectExists(ctx, env.Client, nodeClaim) - Expect(nodeClaim.ObjectMeta.Annotations).To(BeNil()) + Expect(nodeClaim.ObjectMeta.Annotations).To(HaveLen(1)) + Expect(nodeClaim.ObjectMeta.Annotations).To(HaveKey(apis.Group + "/node-restricted-labels")) }) It("should annotate the node if the NodeClaim has a terminationGracePeriod", func() { nodeClaim.Spec.TerminationGracePeriod = &metav1.Duration{Duration: time.Second * 300} @@ -348,9 +350,8 @@ var _ = Describe("Termination", func() { ExpectExists(ctx, env.Client, node) nodeClaim = ExpectExists(ctx, env.Client, nodeClaim) - Expect(nodeClaim.ObjectMeta.Annotations).To(Equal(map[string]string{ - v1.NodeClaimTerminationTimestampAnnotationKey: "2024-04-01T12:00:00-05:00", - })) + Expect(nodeClaim.ObjectMeta.Annotations).To(HaveKeyWithValue( + v1.NodeClaimTerminationTimestampAnnotationKey, "2024-04-01T12:00:00-05:00")) }) It("should not delete Nodes if the NodeClaim is not registered", func() { ExpectApplied(ctx, env.Client, nodePool, nodeClaim) diff --git a/pkg/controllers/provisioning/scheduling/suite_test.go b/pkg/controllers/provisioning/scheduling/suite_test.go index aca19883e7..6cca5d9570 100644 --- a/pkg/controllers/provisioning/scheduling/suite_test.go +++ b/pkg/controllers/provisioning/scheduling/suite_test.go @@ -416,28 +416,28 @@ var _ = Context("Scheduling", func() { ExpectNotScheduled(ctx, env.Client, pod) } }) - It("should schedule pods that have node selectors with label in restricted domains exceptions list", func() { + It("should schedule pods that have node selectors with label in the kubernetes domains", func() { var requirements []v1.NodeSelectorRequirementWithMinValues - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { requirements = append(requirements, v1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: domain + "/test", Operator: corev1.NodeSelectorOpIn, Values: []string{"test-value"}}}) } nodePool.Spec.Template.Spec.Requirements = requirements ExpectApplied(ctx, env.Client, nodePool) - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { pod := test.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value")) } }) - It("should schedule pods that have node selectors with label in subdomain from restricted domains exceptions list", func() { + It("should schedule pods that have node selectors with label in subdomain from kubernetes domains", func() { var requirements []v1.NodeSelectorRequirementWithMinValues - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { requirements = append(requirements, v1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: "subdomain." + domain + "/test", Operator: corev1.NodeSelectorOpIn, Values: []string{"test-value"}}}) } nodePool.Spec.Template.Spec.Requirements = requirements ExpectApplied(ctx, env.Client, nodePool) - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { pod := test.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) @@ -867,28 +867,28 @@ var _ = Context("Scheduling", func() { ExpectNotScheduled(ctx, env.Client, pod) } }) - It("should schedule pods that have node selectors with label in restricted domains exceptions list", func() { + It("should schedule pods that have node selectors with label in kubernetes domains", func() { var requirements []v1.NodeSelectorRequirementWithMinValues - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { requirements = append(requirements, v1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: domain + "/test", Operator: corev1.NodeSelectorOpIn, Values: []string{"test-value"}}}) } nodePool.Spec.Template.Spec.Requirements = requirements ExpectApplied(ctx, env.Client, nodePool) - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { pod := test.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value")) } }) - It("should schedule pods that have node selectors with label in subdomain from restricted domains exceptions list", func() { + It("should schedule pods that have node selectors with label in subdomain from kubernetes domains", func() { var requirements []v1.NodeSelectorRequirementWithMinValues - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { requirements = append(requirements, v1.NodeSelectorRequirementWithMinValues{NodeSelectorRequirement: corev1.NodeSelectorRequirement{Key: "subdomain." + domain + "/test", Operator: corev1.NodeSelectorOpIn, Values: []string{"test-value"}}}) } nodePool.Spec.Template.Spec.Requirements = requirements ExpectApplied(ctx, env.Client, nodePool) - for domain := range v1.LabelDomainExceptions { + for domain := range v1.K8sLabelDomains { pod := test.UnschedulablePod() ExpectProvisioned(ctx, env.Client, cluster, cloudProvider, prov, pod) node := ExpectScheduled(ctx, env.Client, pod) diff --git a/pkg/controllers/provisioning/suite_test.go b/pkg/controllers/provisioning/suite_test.go index 56f1a21dd8..90ae353a50 100644 --- a/pkg/controllers/provisioning/suite_test.go +++ b/pkg/controllers/provisioning/suite_test.go @@ -1236,8 +1236,8 @@ var _ = Describe("Provisioning", func() { Expect(node.Labels).To(HaveKey("test-key-6")) Expect(node.Labels).ToNot(HaveKey("test-key-7")) }) - It("should label nodes with labels in the LabelDomainExceptions list", func() { - for domain := range v1.LabelDomainExceptions { + It("should label nodes with labels in the kubernetes domains", func() { + for domain := range v1.K8sLabelDomains { nodePool := test.NodePool(v1.NodePool{ Spec: v1.NodePoolSpec{ Template: v1.NodeClaimTemplate{ @@ -1258,8 +1258,8 @@ var _ = Describe("Provisioning", func() { Expect(node.Labels).To(HaveKeyWithValue(domain+"/test", "test-value")) } }) - It("should label nodes with labels in the subdomain from LabelDomainExceptions list", func() { - for domain := range v1.LabelDomainExceptions { + It("should label nodes with labels in the subdomain from kubernetes domains", func() { + for domain := range v1.K8sLabelDomains { nodePool := test.NodePool(v1.NodePool{ Spec: v1.NodePoolSpec{ Template: v1.NodeClaimTemplate{