From 0a7121e5fdf6edfa515555d3fae2582cd7886df0 Mon Sep 17 00:00:00 2001 From: Mikkel Oscar Lyderik Larsen Date: Fri, 1 Nov 2024 14:14:21 +0100 Subject: [PATCH] Add a cluster method Name to get canonical clusterName based on provider (#829) * Add a template function to get canonical clusterName based on provider Signed-off-by: Mikkel Oscar Lyderik Larsen * Fix Cluster Name in all situations Signed-off-by: Mikkel Oscar Lyderik Larsen --------- Signed-off-by: Mikkel Oscar Lyderik Larsen --- api/cluster.go | 19 ++++++++-- api/cluster_test.go | 18 ++++++++++ cmd/clm/main.go | 9 ++--- controller/controller.go | 6 ++-- controller/controller_test.go | 56 +++++++++++++++--------------- pkg/updatestrategy/aws_asg.go | 10 +++--- pkg/updatestrategy/aws_asg_test.go | 9 +++-- pkg/updatestrategy/aws_ec2.go | 10 +++--- provisioner/clusterpy.go | 12 +++---- provisioner/node_pools.go | 14 ++++---- provisioner/provisioner.go | 10 ------ provisioner/template_test.go | 35 +++++++++++++++++++ provisioner/zalando_aws.go | 2 +- provisioner/zalando_eks.go | 2 +- registry/http.go | 2 +- 15 files changed, 138 insertions(+), 76 deletions(-) diff --git a/api/cluster.go b/api/cluster.go index ca8467eb..988cf3b0 100644 --- a/api/cluster.go +++ b/api/cluster.go @@ -12,8 +12,16 @@ import ( "github.com/zalando-incubator/cluster-lifecycle-manager/channel" ) +// A provider ID is a string that identifies a cluster provider. +type ProviderID string + const ( overrideChannelConfigItem = "override_channel" + + // ZalandoAWSProvider is the provider ID for Zalando managed AWS clusters. + ZalandoAWSProvider ProviderID = "zalando-aws" + // ZalandoEKSProvider is the provider ID for AWS EKS clusters. + ZalandoEKSProvider ProviderID = "zalando-eks" ) // Cluster describes a kubernetes cluster and related configuration. @@ -29,7 +37,7 @@ type Cluster struct { LifecycleStatus string `json:"lifecycle_status" yaml:"lifecycle_status"` LocalID string `json:"local_id" yaml:"local_id"` NodePools []*NodePool `json:"node_pools" yaml:"node_pools"` - Provider string `json:"provider" yaml:"provider"` + Provider ProviderID `json:"provider" yaml:"provider"` Region string `json:"region" yaml:"region"` Status *ClusterStatus `json:"status" yaml:"status"` Owner string `json:"owner" yaml:"owner"` @@ -73,7 +81,7 @@ func (cluster *Cluster) Version(channelVersion channel.ConfigVersion) (*ClusterV if err != nil { return nil, err } - _, err = state.WriteString(cluster.Provider) + _, err = state.WriteString(string(cluster.Provider)) if err != nil { return nil, err } @@ -195,3 +203,10 @@ func (cluster *Cluster) ASGBackedPools() []*NodePool { } return cp } + +func (cluster Cluster) Name() string { + if cluster.Provider == ZalandoEKSProvider { + return cluster.LocalID + } + return cluster.ID +} diff --git a/api/cluster_test.go b/api/cluster_test.go index bd61d3df..6fe29521 100644 --- a/api/cluster_test.go +++ b/api/cluster_test.go @@ -116,3 +116,21 @@ func TestVersion(t *testing.T) { require.NotEqual(t, version, newVersion, "cluster field: %s", field) } } + +func TestName(t *testing.T) { + cluster := &Cluster{ + ID: "aws:123456789012:eu-central-1:test-cluster", + LocalID: "test-cluster", + Provider: ZalandoAWSProvider, + } + + require.Equal(t, cluster.ID, cluster.Name()) + + cluster = &Cluster{ + ID: "aws:123456789012:eu-central-1:test-cluster", + LocalID: "test-cluster", + Provider: ZalandoEKSProvider, + } + + require.Equal(t, cluster.LocalID, cluster.Name()) +} diff --git a/cmd/clm/main.go b/cmd/clm/main.go index 9e6ebfdb..178deeea 100644 --- a/cmd/clm/main.go +++ b/cmd/clm/main.go @@ -12,6 +12,7 @@ import ( kingpin "github.com/alecthomas/kingpin/v2" log "github.com/sirupsen/logrus" + "github.com/zalando-incubator/cluster-lifecycle-manager/api" "github.com/zalando-incubator/cluster-lifecycle-manager/channel" "github.com/zalando-incubator/cluster-lifecycle-manager/config" "github.com/zalando-incubator/cluster-lifecycle-manager/controller" @@ -115,8 +116,8 @@ func main() { execManager := command.NewExecManager(cfg.ConcurrentExternalProcesses) - provisioners := map[provisioner.ProviderID]provisioner.Provisioner{ - provisioner.ZalandoAWSProvider: provisioner.NewZalandoAWSProvisioner( + provisioners := map[api.ProviderID]provisioner.Provisioner{ + api.ZalandoAWSProvider: provisioner.NewZalandoAWSProvisioner( execManager, clusterTokenSource, secretDecrypter, @@ -130,7 +131,7 @@ func main() { ManageEtcdStack: cfg.ManageEtcdStack, }, ), - provisioner.ZalandoEKSProvider: provisioner.NewZalandoEKSProvisioner( + api.ZalandoEKSProvider: provisioner.NewZalandoEKSProvisioner( execManager, secretDecrypter, cfg.AssumedRole, @@ -218,7 +219,7 @@ func main() { cluster.ConfigItems[key] = decryptedValue } - p, ok := provisioners[provisioner.ProviderID(cluster.Provider)] + p, ok := provisioners[cluster.Provider] if !ok { log.Fatalf( "Cluster %s: unknown provider %q", diff --git a/controller/controller.go b/controller/controller.go index 692dbb7d..b9822bba 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -44,7 +44,7 @@ type Controller struct { logger *log.Entry execManager *command.ExecManager registry registry.Registry - provisioners map[provisioner.ProviderID]provisioner.Provisioner + provisioners map[api.ProviderID]provisioner.Provisioner providers []string channelConfigSourcer channel.ConfigSource interval time.Duration @@ -58,7 +58,7 @@ func New( logger *log.Entry, execManager *command.ExecManager, registry registry.Registry, - provisioners map[provisioner.ProviderID]provisioner.Provisioner, + provisioners map[api.ProviderID]provisioner.Provisioner, channelConfigSourcer channel.ConfigSource, options *Options, ) *Controller { @@ -184,7 +184,7 @@ func (c *Controller) doProcessCluster(ctx context.Context, logger *log.Entry, cl } }() - provisioner, ok := c.provisioners[provisioner.ProviderID(cluster.Provider)] + provisioner, ok := c.provisioners[api.ProviderID(cluster.Provider)] if !ok { return fmt.Errorf( "cluster %s: unknown provider %q", diff --git a/controller/controller_test.go b/controller/controller_test.go index ab43c734..299e4b78 100644 --- a/controller/controller_test.go +++ b/controller/controller_test.go @@ -18,9 +18,9 @@ import ( ) const ( - nextVersion = "version" - mockProvider provisioner.ProviderID = "" - otherMockProvider provisioner.ProviderID = "" + nextVersion = "version" + mockProvider api.ProviderID = "" + otherMockProvider api.ProviderID = "" ) var defaultLogger = log.WithFields(map[string]interface{}{}) @@ -28,7 +28,7 @@ var defaultLogger = log.WithFields(map[string]interface{}{}) type mockProvisioner struct{} func (p *mockProvisioner) Supports(cluster *api.Cluster) bool { - return cluster.Provider == string(mockProvider) + return cluster.Provider == mockProvider } func (p *mockProvisioner) Provision( @@ -116,7 +116,7 @@ func createMockRegistry(lifecycleStatus string, status *api.ClusterStatus) *mock Channel: "alpha", LifecycleStatus: lifecycleStatus, Status: status, - Provider: string(mockProvider), + Provider: mockProvider, } return &mockRegistry{theCluster: cluster} } @@ -221,7 +221,7 @@ func TestProcessCluster(t *testing.T) { for _, ti := range []struct { testcase string registry registry.Registry - provisioners map[provisioner.ProviderID]provisioner.Provisioner + provisioners map[api.ProviderID]provisioner.Provisioner channelSource channel.ConfigSource options *Options success bool @@ -229,7 +229,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status requested", registry: createMockRegistry(statusRequested, nil), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockProvisioner{}, }, @@ -240,7 +240,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status requested, invalid provider", registry: createMockRegistry(statusRequested, nil), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ otherMockProvider: &mockProvisioner{}, }, @@ -251,7 +251,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status ready", registry: createMockRegistry(statusReady, nil), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockProvisioner{}, }, channelSource: MockChannelSource(false, false), @@ -261,7 +261,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status decommission-requested", registry: createMockRegistry(statusDecommissionRequested, nil), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockProvisioner{}, }, channelSource: MockChannelSource(false, false), @@ -271,7 +271,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status requested, provisioner.Create fails", registry: createMockRegistry(statusRequested, &api.ClusterStatus{CurrentVersion: nextVersion}), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockErrCreateProvisioner{}, }, channelSource: MockChannelSource(false, false), @@ -281,7 +281,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status ready, version up to date fails", registry: createMockRegistry(statusReady, &api.ClusterStatus{CurrentVersion: nextVersion}), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockProvisioner{}, }, channelSource: MockChannelSource(false, false), @@ -291,7 +291,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status ready, provisioner.Version failing", registry: createMockRegistry(statusReady, nil), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockErrProvisioner{}, }, channelSource: MockChannelSource(false, false), @@ -301,7 +301,7 @@ func TestProcessCluster(t *testing.T) { { testcase: "lifecycle status ready, channelSource.Version() fails", registry: createMockRegistry(statusReady, nil), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockErrProvisioner{}, }, channelSource: MockChannelSource(true, false), @@ -310,7 +310,7 @@ func TestProcessCluster(t *testing.T) { }, { testcase: "lifecycle status ready, channelVersion.Get() fails", registry: createMockRegistry(statusReady, nil), - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockErrProvisioner{}, }, channelSource: MockChannelSource(false, true), @@ -352,7 +352,7 @@ func TestIgnoreUnsupportedProvider(t *testing.T) { defaultLogger, command.NewExecManager(1), registry, - map[provisioner.ProviderID]provisioner.Provisioner{ + map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockProvisioner{}, }, MockChannelSource(false, false), @@ -371,36 +371,36 @@ func TestDropUnsupportedClusters(t *testing.T) { testcase string clusters []*api.Cluster result []*api.Cluster - provisioners map[provisioner.ProviderID]provisioner.Provisioner + provisioners map[api.ProviderID]provisioner.Provisioner }{ { testcase: "empty result on empty input", clusters: []*api.Cluster{}, result: []*api.Cluster{}, - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{}, + provisioners: map[api.ProviderID]provisioner.Provisioner{}, }, { testcase: "empty result on nil input", clusters: nil, result: []*api.Cluster{}, - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{}, + provisioners: map[api.ProviderID]provisioner.Provisioner{}, }, { testcase: "same result when all clusters supported", - clusters: []*api.Cluster{{Provider: string(mockProvider)}}, - result: []*api.Cluster{{Provider: string(mockProvider)}}, - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + clusters: []*api.Cluster{{Provider: mockProvider}}, + result: []*api.Cluster{{Provider: mockProvider}}, + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockProvisioner{}, }, }, { testcase: "filters unsupported clusters", clusters: []*api.Cluster{ - {Provider: string(mockProvider)}, - {Provider: string(otherMockProvider)}, + {Provider: mockProvider}, + {Provider: otherMockProvider}, }, - result: []*api.Cluster{{Provider: string(mockProvider)}}, - provisioners: map[provisioner.ProviderID]provisioner.Provisioner{ + result: []*api.Cluster{{Provider: mockProvider}}, + provisioners: map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockProvisioner{}, }, }, @@ -426,7 +426,7 @@ func TestCoalesceFailures(t *testing.T) { defaultLogger, command.NewExecManager(1), registry, - map[provisioner.ProviderID]provisioner.Provisioner{ + map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockCountingErrProvisioner{}, }, MockChannelSource(false, false), @@ -454,7 +454,7 @@ func TestCoalesceFailures(t *testing.T) { defaultLogger, command.NewExecManager(1), registry, - map[provisioner.ProviderID]provisioner.Provisioner{ + map[api.ProviderID]provisioner.Provisioner{ mockProvider: &mockErrProvisioner{}, }, MockChannelSource(false, false), diff --git a/pkg/updatestrategy/aws_asg.go b/pkg/updatestrategy/aws_asg.go index 1fa9d528..504656de 100644 --- a/pkg/updatestrategy/aws_asg.go +++ b/pkg/updatestrategy/aws_asg.go @@ -50,17 +50,17 @@ type ASGNodePoolsBackend struct { asgClient autoscalingiface.AutoScalingAPI ec2Client ec2iface.EC2API elbClient elbiface.ELBAPI - clusterID string + cluster *api.Cluster } -// NewASGNodePoolsBackend initializes a new ASGNodePoolsBackend for the given clusterID and AWS +// NewASGNodePoolsBackend initializes a new ASGNodePoolsBackend for the given cluster and AWS // session and. -func NewASGNodePoolsBackend(clusterID string, sess *session.Session) *ASGNodePoolsBackend { +func NewASGNodePoolsBackend(cluster *api.Cluster, sess *session.Session) *ASGNodePoolsBackend { return &ASGNodePoolsBackend{ asgClient: autoscaling.New(sess), ec2Client: ec2.New(sess), elbClient: elb.New(sess), - clusterID: clusterID, + cluster: cluster, } } @@ -432,7 +432,7 @@ func (n *ASGNodePoolsBackend) getNodePoolASGs(nodePool *api.NodePool) ([]*autosc expectedTags := []*autoscaling.TagDescription{ { - Key: aws.String(clusterIDTagPrefix + n.clusterID), + Key: aws.String(clusterIDTagPrefix + n.cluster.Name()), Value: aws.String(resourceLifecycleOwned), }, { diff --git a/pkg/updatestrategy/aws_asg_test.go b/pkg/updatestrategy/aws_asg_test.go index d0c8aaa8..ee47cd51 100644 --- a/pkg/updatestrategy/aws_asg_test.go +++ b/pkg/updatestrategy/aws_asg_test.go @@ -248,7 +248,7 @@ func TestGet(tt *testing.T) { asgClient: tc.asgClient, ec2Client: tc.ec2Client, elbClient: tc.elbClient, - clusterID: "", + cluster: &api.Cluster{}, } _, err := backend.Get(context.Background(), &api.NodePool{Name: "test"}) @@ -670,7 +670,7 @@ func TestGetInstanceUpdateInfo(t *testing.T) { }, }, }, - clusterID: "", + cluster: &api.Cluster{}, } res, err := backend.Get(context.Background(), &api.NodePool{Name: "test"}) @@ -689,6 +689,7 @@ func TestScale(t *testing.T) { // test not getting the ASGs backend := &ASGNodePoolsBackend{ asgClient: &mockASGAPI{}, + cluster: &api.Cluster{}, } err := backend.Scale(context.Background(), &api.NodePool{Name: "test"}, 10) assert.Error(t, err) @@ -719,6 +720,7 @@ func TestScale(t *testing.T) { }, }, }, + cluster: &api.Cluster{}, } err = backend.Scale(context.Background(), &api.NodePool{Name: "test"}, 10) assert.NoError(t, err) @@ -749,6 +751,7 @@ func TestScale(t *testing.T) { }, }, }, + cluster: &api.Cluster{}, } err = backend.Scale(context.Background(), &api.NodePool{Name: "test"}, 1) assert.NoError(t, err) @@ -806,7 +809,7 @@ func TestDeleteTags(tt *testing.T) { tt.Run(tc.msg, func(t *testing.T) { backend := &ASGNodePoolsBackend{ asgClient: tc.asgClient, - clusterID: "", + cluster: &api.Cluster{}, } err := backend.deleteTags(tc.nodePool, tc.tags) diff --git a/pkg/updatestrategy/aws_ec2.go b/pkg/updatestrategy/aws_ec2.go index 5f8a1ef9..21c192f4 100644 --- a/pkg/updatestrategy/aws_ec2.go +++ b/pkg/updatestrategy/aws_ec2.go @@ -65,15 +65,15 @@ func InstanceConfigUpToDate(instanceConfig, poolConfig *InstanceConfig) bool { type EC2NodePoolBackend struct { crdResolver *util.LazyOf[*KarpenterCRDNameResolver] ec2Client ec2iface.EC2API - clusterID string + cluster *api.Cluster } // NewEC2NodePoolBackend initializes a new EC2NodePoolBackend for // the given clusterID and AWS session and. -func NewEC2NodePoolBackend(clusterID string, sess *session.Session, crdResolverInitializer func() (*KarpenterCRDNameResolver, error)) *EC2NodePoolBackend { +func NewEC2NodePoolBackend(cluster *api.Cluster, sess *session.Session, crdResolverInitializer func() (*KarpenterCRDNameResolver, error)) *EC2NodePoolBackend { return &EC2NodePoolBackend{ ec2Client: ec2.New(sess), - clusterID: clusterID, + cluster: cluster, crdResolver: util.NewLazyOf[*KarpenterCRDNameResolver](crdResolverInitializer), } } @@ -134,7 +134,7 @@ func (n *EC2NodePoolBackend) Get(ctx context.Context, nodePool *api.NodePool) (* func (n *EC2NodePoolBackend) filterWithNodePool(nodePool *api.NodePool) []*ec2.Filter { return []*ec2.Filter{ { - Name: aws.String("tag:" + clusterIDTagPrefix + n.clusterID), + Name: aws.String("tag:" + clusterIDTagPrefix + n.cluster.Name()), Values: []*string{ aws.String(resourceLifecycleOwned), }, @@ -230,7 +230,7 @@ func (n *EC2NodePoolBackend) DecommissionKarpenterNodes(ctx context.Context) err } return n.decommission(ctx, []*ec2.Filter{ { - Name: aws.String("tag:" + clusterIDTagPrefix + n.clusterID), + Name: aws.String("tag:" + clusterIDTagPrefix + n.cluster.Name()), Values: []*string{ aws.String(resourceLifecycleOwned), }, diff --git a/provisioner/clusterpy.go b/provisioner/clusterpy.go index bc2ab7f5..ea02acba 100644 --- a/provisioner/clusterpy.go +++ b/provisioner/clusterpy.go @@ -287,7 +287,7 @@ func (p *clusterpyProvisioner) provision( } // create or update the etcd stack - if p.manageEtcdStack && cluster.Provider == string(ZalandoAWSProvider) { + if p.manageEtcdStack && cluster.Provider == api.ZalandoAWSProvider { etcdKMSKeyARN, err := awsAdapter.resolveKeyID(etcdKMSKeyAlias) if err != nil { return err @@ -709,7 +709,7 @@ func (p *clusterpyProvisioner) decommission( } // decommission karpenter node-pools, since karpenter controller is decommissioned. we need to clean up ec2 resources - ec2Backend := updatestrategy.NewEC2NodePoolBackend(cluster.ID, awsAdapter.session, func() (*updatestrategy.KarpenterCRDNameResolver, error) { + ec2Backend := updatestrategy.NewEC2NodePoolBackend(cluster, awsAdapter.session, func() (*updatestrategy.KarpenterCRDNameResolver, error) { k8sClients, err := kubernetes.NewClientsCollection( cluster.APIServerURL, tokenSource, @@ -769,7 +769,7 @@ func (p *clusterpyProvisioner) decommission( } func (p *clusterpyProvisioner) removeEBSVolumes(awsAdapter *awsAdapter, cluster *api.Cluster) error { - clusterTag := fmt.Sprintf("kubernetes.io/cluster/%s", cluster.ID) + clusterTag := fmt.Sprintf("kubernetes.io/cluster/%s", cluster.Name()) volumes, err := awsAdapter.GetVolumes(map[string]string{clusterTag: "owned"}) if err != nil { return err @@ -954,12 +954,12 @@ func (p *clusterpyProvisioner) updater( } additionalBackends := map[string]updatestrategy.ProviderNodePoolsBackend{ - karpenterNodePoolProfile: updatestrategy.NewEC2NodePoolBackend(cluster.ID, awsAdapter.session, func() (*updatestrategy.KarpenterCRDNameResolver, error) { + karpenterNodePoolProfile: updatestrategy.NewEC2NodePoolBackend(cluster, awsAdapter.session, func() (*updatestrategy.KarpenterCRDNameResolver, error) { return updatestrategy.NewKarpenterCRDResolver(context.Background(), k8sClients) }), } - asgBackend := updatestrategy.NewASGNodePoolsBackend(cluster.ID, awsAdapter.session) + asgBackend := updatestrategy.NewASGNodePoolsBackend(cluster, awsAdapter.session) poolBackend := updatestrategy.NewProfileNodePoolsBackend(asgBackend, additionalBackends) poolManager := updatestrategy.NewKubernetesNodePoolManager(logger, client, poolBackend, drainConfig, noScheduleTaint) @@ -1018,7 +1018,7 @@ func (p *clusterpyProvisioner) downscaleDeployments( func (p *clusterpyProvisioner) listClusterStacks(_ context.Context, adapter *awsAdapter, cluster *api.Cluster) ([]*cloudformation.Stack, error) { includeTags := map[string]string{ - tagNameKubernetesClusterPrefix + cluster.ID: resourceLifecycleOwned, + tagNameKubernetesClusterPrefix + cluster.Name(): resourceLifecycleOwned, } excludeTags := map[string]string{ mainStackTagKey: stackTagValueTrue, diff --git a/provisioner/node_pools.go b/provisioner/node_pools.go index 74acb314..4464189a 100644 --- a/provisioner/node_pools.go +++ b/provisioner/node_pools.go @@ -375,11 +375,11 @@ func (p *AWSNodePoolProvisioner) provisionNodePool(ctx context.Context, nodePool stackName := fmt.Sprintf("nodepool-%s-%s", nodePool.Name, strings.Replace(p.cluster.ID, ":", "-", -1)) tags := map[string]string{ - tagNameKubernetesClusterPrefix + p.cluster.ID: resourceLifecycleOwned, - nodePoolRoleTagKey: "true", - nodePoolTagKey: nodePool.Name, - nodePoolTagKeyLegacy: nodePool.Name, - nodePoolProfileTagKey: nodePool.Profile, + tagNameKubernetesClusterPrefix + p.cluster.Name(): resourceLifecycleOwned, + nodePoolRoleTagKey: "true", + nodePoolTagKey: nodePool.Name, + nodePoolTagKeyLegacy: nodePool.Name, + nodePoolProfileTagKey: nodePool.Profile, } err = p.awsAdapter.applyStack(stackName, template, "", tags, true, nil) @@ -402,8 +402,8 @@ func (p *AWSNodePoolProvisioner) provisionNodePool(ctx context.Context, nodePool func (p *AWSNodePoolProvisioner) Reconcile(ctx context.Context, updater updatestrategy.UpdateStrategy) error { // decommission orphaned node pools tags := map[string]string{ - tagNameKubernetesClusterPrefix + p.cluster.ID: resourceLifecycleOwned, - nodePoolRoleTagKey: "true", + tagNameKubernetesClusterPrefix + p.cluster.Name(): resourceLifecycleOwned, + nodePoolRoleTagKey: "true", } nodePoolStacks, err := p.awsAdapter.ListStacks(tags, nil) diff --git a/provisioner/provisioner.go b/provisioner/provisioner.go index 3a4cb45b..d8efd13a 100644 --- a/provisioner/provisioner.go +++ b/provisioner/provisioner.go @@ -11,9 +11,6 @@ import ( ) type ( - // A provider ID is a string that identifies a cluster provider. - ProviderID string - // Provisioner is an interface describing how to provision or decommission // clusters. Provisioner interface { @@ -71,13 +68,6 @@ type ( } ) -const ( - // ZalandoAWSProvider is the provider ID for Zalando managed AWS clusters. - ZalandoAWSProvider ProviderID = "zalando-aws" - // ZalandoEKSProvider is the provider ID for AWS EKS clusters. - ZalandoEKSProvider ProviderID = "zalando-eks" -) - var ( // ErrProviderNotSupported is the error returned from porvisioners if // they don't support the cluster provider defined. diff --git a/provisioner/template_test.go b/provisioner/template_test.go index 54c7ecf0..d67b5ed5 100644 --- a/provisioner/template_test.go +++ b/provisioner/template_test.go @@ -1554,3 +1554,38 @@ func TestNthAddressFromCIDR(t *testing.T) { }) } } + +func TestClusterName(t *testing.T) { + for _, tc := range []struct { + name string + cluster api.Cluster + input string + expected string + }{ + { + name: "zalando-aws cluster has cluster.Name == ID", + cluster: api.Cluster{ + Provider: api.ZalandoAWSProvider, + ID: "aws:12345678910:eu-central-1:zalando-aws", + LocalID: "zalando-aws", + }, + expected: "aws:12345678910:eu-central-1:zalando-aws", + input: `{{ .Values.data.cluster.Name }}`, + }, + { + name: "zalando-eks cluster has cluster.Name == LocalID", + cluster: api.Cluster{ + Provider: api.ZalandoEKSProvider, + ID: "aws:12345678910:eu-central-1:zalando-eks", + LocalID: "zalando-eks", + }, + expected: "zalando-eks", + input: `{{ .Values.data.cluster.Name }}`, + }, + } { + t.Run(tc.name, func(t *testing.T) { + result, _ := renderSingle(t, tc.input, map[string]interface{}{"cluster": tc.cluster}) + require.EqualValues(t, tc.expected, result) + }) + } +} diff --git a/provisioner/zalando_aws.go b/provisioner/zalando_aws.go index 6965a964..c8372837 100644 --- a/provisioner/zalando_aws.go +++ b/provisioner/zalando_aws.go @@ -50,7 +50,7 @@ func NewZalandoAWSProvisioner( } func (z *ZalandoAWSProvisioner) Supports(cluster *api.Cluster) bool { - return cluster.Provider == string(ZalandoAWSProvider) + return cluster.Provider == api.ZalandoAWSProvider } func (z *ZalandoAWSProvisioner) Provision( diff --git a/provisioner/zalando_eks.go b/provisioner/zalando_eks.go index 4c00ca39..2dd4e58b 100644 --- a/provisioner/zalando_eks.go +++ b/provisioner/zalando_eks.go @@ -64,7 +64,7 @@ func NewZalandoEKSProvisioner( } func (z *ZalandoEKSProvisioner) Supports(cluster *api.Cluster) bool { - return cluster.Provider == string(ZalandoEKSProvider) + return cluster.Provider == api.ZalandoEKSProvider } func (z *ZalandoEKSProvisioner) Provision( diff --git a/registry/http.go b/registry/http.go index a2a8fc0b..e23c4365 100644 --- a/registry/http.go +++ b/registry/http.go @@ -214,7 +214,7 @@ func convertFromClusterModel(cluster *models.Cluster) (*api.Cluster, error) { LifecycleStatus: *cluster.LifecycleStatus, LocalID: *cluster.LocalID, NodePools: nodePools, - Provider: *cluster.Provider, + Provider: api.ProviderID(*cluster.Provider), Region: *cluster.Region, Status: convertFromClusterStatusModel(cluster.Status), }, nil