diff --git a/cmd/kops/delete_instance.go b/cmd/kops/delete_instance.go index 0f8a0ab9e5b98..a442552453b0a 100644 --- a/cmd/kops/delete_instance.go +++ b/cmd/kops/delete_instance.go @@ -219,7 +219,6 @@ func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, opti d := &instancegroups.RollingUpdateCluster{ Clientset: clientSet, Cluster: cluster, - Ctx: ctx, MasterInterval: 0, NodeInterval: 0, BastionInterval: 0, @@ -248,7 +247,7 @@ func RunDeleteInstance(ctx context.Context, f *util.Factory, out io.Writer, opti } d.ClusterValidator = clusterValidator - return d.UpdateSingleInstance(cloudMember, options.Surge) + return d.UpdateSingleInstance(ctx, cloudMember, options.Surge) } func getNodes(ctx context.Context, cluster *kopsapi.Cluster, verbose bool) (kubernetes.Interface, string, []v1.Node, error) { diff --git a/cmd/kops/rolling-update_cluster.go b/cmd/kops/rolling-update_cluster.go index 7cd40abaedec8..bddb6fee957c4 100644 --- a/cmd/kops/rolling-update_cluster.go +++ b/cmd/kops/rolling-update_cluster.go @@ -348,7 +348,6 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer d := &instancegroups.RollingUpdateCluster{ Clientset: clientset, - Ctx: ctx, Cluster: cluster, MasterInterval: options.ControlPlaneInterval, NodeInterval: options.NodeInterval, @@ -456,7 +455,7 @@ func RunRollingUpdateCluster(ctx context.Context, f *util.Factory, out io.Writer } d.ClusterValidator = clusterValidator - return d.RollingUpdate(groups, list) + return d.RollingUpdate(ctx, groups, list) } func completeInstanceGroup(f commandutils.Factory, selectedInstanceGroups *[]string, selectedInstanceGroupRoles *[]string) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { diff --git a/pkg/instancegroups/instancegroups.go b/pkg/instancegroups/instancegroups.go index e92904e46258f..4d8c623fcd16b 100644 --- a/pkg/instancegroups/instancegroups.go +++ b/pkg/instancegroups/instancegroups.go @@ -98,7 +98,7 @@ func promptInteractive(upgradedHostID, upgradedHostName string) (stopPrompting b } // RollingUpdate performs a rolling update on a list of instances. -func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(group *cloudinstances.CloudInstanceGroup, sleepAfterTerminate time.Duration) (err error) { +func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(ctx context.Context, group *cloudinstances.CloudInstanceGroup, sleepAfterTerminate time.Duration) (err error) { isBastion := group.InstanceGroup.IsBastion() // Do not need a k8s client if you are doing cloudonly. if c.K8sClient == nil && !c.CloudOnly { @@ -123,7 +123,7 @@ func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(group *cloudinstances. } if !c.CloudOnly { - err = c.taintAllNeedUpdate(group, update) + err = c.taintAllNeedUpdate(ctx, group, update) if err != nil { return err } @@ -221,7 +221,7 @@ func (c *RollingUpdateCluster) rollingUpdateInstanceGroup(group *cloudinstances. for uIdx, u := range update { go func(m *cloudinstances.CloudInstance) { - terminateChan <- c.drainTerminateAndWait(m, sleepAfterTerminate) + terminateChan <- c.drainTerminateAndWait(ctx, m, sleepAfterTerminate) }(u) runningDrains++ @@ -319,7 +319,7 @@ func waitForPendingBeforeReturningError(runningDrains int, terminateChan chan er return err } -func (c *RollingUpdateCluster) taintAllNeedUpdate(group *cloudinstances.CloudInstanceGroup, update []*cloudinstances.CloudInstance) error { +func (c *RollingUpdateCluster) taintAllNeedUpdate(ctx context.Context, group *cloudinstances.CloudInstanceGroup, update []*cloudinstances.CloudInstance) error { var toTaint []*corev1.Node for _, u := range update { if u.Node != nil && !u.Node.Spec.Unschedulable { @@ -341,7 +341,7 @@ func (c *RollingUpdateCluster) taintAllNeedUpdate(group *cloudinstances.CloudIns } klog.Infof("Tainting %d %s in %q instancegroup.", len(toTaint), noun, group.InstanceGroup.Name) for _, n := range toTaint { - if err := c.patchTaint(n); err != nil { + if err := c.patchTaint(ctx, n); err != nil { if c.FailOnDrainError { return fmt.Errorf("failed to taint node %q: %v", n, err) } @@ -352,7 +352,7 @@ func (c *RollingUpdateCluster) taintAllNeedUpdate(group *cloudinstances.CloudIns return nil } -func (c *RollingUpdateCluster) patchTaint(node *corev1.Node) error { +func (c *RollingUpdateCluster) patchTaint(ctx context.Context, node *corev1.Node) error { oldData, err := json.Marshal(node) if err != nil { return err @@ -373,14 +373,14 @@ func (c *RollingUpdateCluster) patchTaint(node *corev1.Node) error { return err } - _, err = c.K8sClient.CoreV1().Nodes().Patch(c.Ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = c.K8sClient.CoreV1().Nodes().Patch(ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if apierrors.IsNotFound(err) { return nil } return err } -func (c *RollingUpdateCluster) patchExcludeFromLB(node *corev1.Node) error { +func (c *RollingUpdateCluster) patchExcludeFromLB(ctx context.Context, node *corev1.Node) error { oldData, err := json.Marshal(node) if err != nil { return err @@ -405,14 +405,14 @@ func (c *RollingUpdateCluster) patchExcludeFromLB(node *corev1.Node) error { return err } - _, err = c.K8sClient.CoreV1().Nodes().Patch(c.Ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + _, err = c.K8sClient.CoreV1().Nodes().Patch(ctx, node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if apierrors.IsNotFound(err) { return nil } return err } -func (c *RollingUpdateCluster) drainTerminateAndWait(u *cloudinstances.CloudInstance, sleepAfterTerminate time.Duration) error { +func (c *RollingUpdateCluster) drainTerminateAndWait(ctx context.Context, u *cloudinstances.CloudInstance, sleepAfterTerminate time.Duration) error { instanceID := u.ID nodeName := "" @@ -430,7 +430,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(u *cloudinstances.CloudInst if u.Node != nil { klog.Infof("Draining the node: %q.", nodeName) - if err := c.drainNode(u); err != nil { + if err := c.drainNode(ctx, u); err != nil { if c.FailOnDrainError { return fmt.Errorf("failed to drain node %q: %v", nodeName, err) } @@ -449,7 +449,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(u *cloudinstances.CloudInst klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceID) } else { klog.Infof("deleting node %q from kubernetes", nodeName) - if err := c.deleteNode(u.Node); err != nil { + if err := c.deleteNode(ctx, u.Node); err != nil { return fmt.Errorf("error deleting node %q: %v", nodeName, err) } } @@ -460,7 +460,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(u *cloudinstances.CloudInst return err } - if err := c.reconcileInstanceGroup(); err != nil { + if err := c.reconcileInstanceGroup(ctx); err != nil { klog.Errorf("error reconciling instance group %q: %v", u.CloudInstanceGroup.HumanName, err) return err } @@ -472,7 +472,7 @@ func (c *RollingUpdateCluster) drainTerminateAndWait(u *cloudinstances.CloudInst return nil } -func (c *RollingUpdateCluster) reconcileInstanceGroup() error { +func (c *RollingUpdateCluster) reconcileInstanceGroup(ctx context.Context) error { if c.Cluster.GetCloudProvider() != api.CloudProviderOpenstack && c.Cluster.GetCloudProvider() != api.CloudProviderHetzner && c.Cluster.GetCloudProvider() != api.CloudProviderScaleway && @@ -497,7 +497,7 @@ func (c *RollingUpdateCluster) reconcileInstanceGroup() error { DeletionProcessing: fi.DeletionProcessingModeDeleteIfNotDeferrred, } - _, err := applyCmd.Run(c.Ctx) + _, err := applyCmd.Run(ctx) return err } @@ -645,7 +645,7 @@ func (c *RollingUpdateCluster) deleteInstance(u *cloudinstances.CloudInstance) e } // drainNode drains a K8s node. -func (c *RollingUpdateCluster) drainNode(u *cloudinstances.CloudInstance) error { +func (c *RollingUpdateCluster) drainNode(ctx context.Context, u *cloudinstances.CloudInstance) error { if c.K8sClient == nil { return fmt.Errorf("K8sClient not set") } @@ -659,7 +659,7 @@ func (c *RollingUpdateCluster) drainNode(u *cloudinstances.CloudInstance) error } helper := &drain.Helper{ - Ctx: c.Ctx, + Ctx: ctx, Client: c.K8sClient, Force: true, GracePeriodSeconds: -1, @@ -679,7 +679,7 @@ func (c *RollingUpdateCluster) drainNode(u *cloudinstances.CloudInstance) error return fmt.Errorf("error cordoning node: %v", err) } - if err := c.patchExcludeFromLB(u.Node); err != nil { + if err := c.patchExcludeFromLB(ctx, u.Node); err != nil { if apierrors.IsNotFound(err) { return nil } @@ -720,9 +720,9 @@ func (c *RollingUpdateCluster) drainNode(u *cloudinstances.CloudInstance) error } // deleteNode deletes a node from the k8s API. It does not delete the underlying instance. -func (c *RollingUpdateCluster) deleteNode(node *corev1.Node) error { +func (c *RollingUpdateCluster) deleteNode(ctx context.Context, node *corev1.Node) error { var options metav1.DeleteOptions - err := c.K8sClient.CoreV1().Nodes().Delete(c.Ctx, node.Name, options) + err := c.K8sClient.CoreV1().Nodes().Delete(ctx, node.Name, options) if err != nil { if apierrors.IsNotFound(err) { return nil @@ -735,7 +735,7 @@ func (c *RollingUpdateCluster) deleteNode(node *corev1.Node) error { } // UpdateSingleInstance performs a rolling update on a single instance -func (c *RollingUpdateCluster) UpdateSingleInstance(cloudMember *cloudinstances.CloudInstance, detach bool) error { +func (c *RollingUpdateCluster) UpdateSingleInstance(ctx context.Context, cloudMember *cloudinstances.CloudInstance, detach bool) error { if detach { if cloudMember.CloudInstanceGroup.InstanceGroup.IsControlPlane() { klog.Warning("cannot detach control-plane instances. Assuming --surge=false") @@ -750,5 +750,5 @@ func (c *RollingUpdateCluster) UpdateSingleInstance(cloudMember *cloudinstances. } } - return c.drainTerminateAndWait(cloudMember, 0) + return c.drainTerminateAndWait(ctx, cloudMember, 0) } diff --git a/pkg/instancegroups/instancegroups_test.go b/pkg/instancegroups/instancegroups_test.go index 728adfbf061de..12dce20b4f5e3 100644 --- a/pkg/instancegroups/instancegroups_test.go +++ b/pkg/instancegroups/instancegroups_test.go @@ -60,7 +60,7 @@ func TestWarmPoolOnlyRoll(t *testing.T) { instance.State = cloudinstances.WarmPool { - err := c.rollingUpdateInstanceGroup(group, 0*time.Second) + err := c.rollingUpdateInstanceGroup(ctx, group, 0*time.Second) if err != nil { t.Fatalf("could not roll instance group: %v", err) } diff --git a/pkg/instancegroups/rollingupdate.go b/pkg/instancegroups/rollingupdate.go index f361d3db2de92..823b65838c70b 100644 --- a/pkg/instancegroups/rollingupdate.go +++ b/pkg/instancegroups/rollingupdate.go @@ -38,7 +38,6 @@ import ( // RollingUpdateCluster is a struct containing cluster information for a rolling update. type RollingUpdateCluster struct { Clientset simple.Clientset - Ctx context.Context Cluster *api.Cluster Cloud fi.Cloud @@ -106,7 +105,7 @@ func (*RollingUpdateCluster) AdjustNeedUpdate(groups map[string]*cloudinstances. } // RollingUpdate performs a rolling update on a K8s Cluster. -func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.CloudInstanceGroup, instanceGroups *api.InstanceGroupList) error { +func (c *RollingUpdateCluster) RollingUpdate(ctx context.Context, groups map[string]*cloudinstances.CloudInstanceGroup, instanceGroups *api.InstanceGroupList) error { if len(groups) == 0 { klog.Info("Cloud Instance Group length is zero. Not doing a rolling-update.") return nil @@ -147,7 +146,7 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.C defer wg.Done() - err := c.rollingUpdateInstanceGroup(bastionGroups[k], c.BastionInterval) + err := c.rollingUpdateInstanceGroup(ctx, bastionGroups[k], c.BastionInterval) resultsMutex.Lock() results[k] = err @@ -172,7 +171,7 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.C // and we don't want to roll all the control-plane nodes at the same time. See issue #284 for _, k := range sortGroups(masterGroups) { - err := c.rollingUpdateInstanceGroup(masterGroups[k], c.MasterInterval) + err := c.rollingUpdateInstanceGroup(ctx, masterGroups[k], c.MasterInterval) // Do not continue update if control-plane node(s) failed; cluster is potentially in an unhealthy state. if err != nil { return fmt.Errorf("control-plane node not healthy after update, stopping rolling-update: %q", err) @@ -187,7 +186,7 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.C } for _, k := range sortGroups(apiServerGroups) { - err := c.rollingUpdateInstanceGroup(apiServerGroups[k], c.NodeInterval) + err := c.rollingUpdateInstanceGroup(ctx, apiServerGroups[k], c.NodeInterval) results[k] = err if err != nil { klog.Errorf("failed to roll InstanceGroup %q: %v", k, err) @@ -212,7 +211,7 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*cloudinstances.C } for _, k := range sortGroups(nodeGroups) { - err := c.rollingUpdateInstanceGroup(nodeGroups[k], c.NodeInterval) + err := c.rollingUpdateInstanceGroup(ctx, nodeGroups[k], c.NodeInterval) results[k] = err if err != nil { klog.Errorf("failed to roll InstanceGroup %q: %v", k, err) diff --git a/pkg/instancegroups/rollingupdate_os_test.go b/pkg/instancegroups/rollingupdate_os_test.go index 698984d587365..994129a9309ef 100644 --- a/pkg/instancegroups/rollingupdate_os_test.go +++ b/pkg/instancegroups/rollingupdate_os_test.go @@ -83,7 +83,6 @@ func getTestSetupOS(t *testing.T, ctx context.Context) (*RollingUpdateCluster, * ValidateTickDuration: 1 * time.Millisecond, ValidateSuccessDuration: 5 * time.Millisecond, ValidateCount: 2, - Ctx: ctx, Cluster: cluster, Clientset: clientset, } @@ -106,7 +105,7 @@ func TestRollingUpdateDisabledSurgeOS(t *testing.T) { c, cloud := getTestSetupOS(t, ctx) groups, igList := getGroupsAllNeedUpdateOS(t, c) - err := c.RollingUpdate(groups, igList) + err := c.RollingUpdate(ctx, groups, igList) assert.NoError(t, err, "rolling update") assertGroupInstanceCountOS(t, cloud, "node-1", 3) @@ -117,6 +116,8 @@ func TestRollingUpdateDisabledSurgeOS(t *testing.T) { func makeGroupOS(t *testing.T, groups map[string]*cloudinstances.CloudInstanceGroup, igList *kopsapi.InstanceGroupList, c *RollingUpdateCluster, subnet string, role kopsapi.InstanceGroupRole, count int, needUpdate int) { + ctx := context.TODO() + cloud := c.Cloud.(*openstack.MockCloud) igif := c.Clientset.InstanceGroupsFor(c.Cluster) fakeClient := c.K8sClient.(*fake.Clientset) @@ -136,7 +137,7 @@ func makeGroupOS(t *testing.T, groups map[string]*cloudinstances.CloudInstanceGr igList.Items = append(igList.Items, newIg) - ig, err := igif.Create(c.Ctx, &newIg, v1meta.CreateOptions{}) + ig, err := igif.Create(ctx, &newIg, v1meta.CreateOptions{}) if err != nil { t.Fatalf("Failed to create ig %v: %v", subnet, err) } diff --git a/pkg/instancegroups/rollingupdate_test.go b/pkg/instancegroups/rollingupdate_test.go index ca92e14a85bab..0eb2e6b10dda7 100644 --- a/pkg/instancegroups/rollingupdate_test.go +++ b/pkg/instancegroups/rollingupdate_test.go @@ -65,7 +65,6 @@ func getTestSetup() (*RollingUpdateCluster, *awsup.MockAWSCloud) { cluster.Name = "test.k8s.local" c := &RollingUpdateCluster{ - Ctx: context.Background(), Cluster: cluster, Cloud: mockcloud, MasterInterval: 1 * time.Millisecond, @@ -213,7 +212,7 @@ func TestRollingUpdateAllNeedUpdate(t *testing.T) { c, cloud := getTestSetup() groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") cordoned := "" @@ -269,7 +268,7 @@ func TestRollingUpdateAllNeedUpdateCloudonly(t *testing.T) { c.ClusterValidator = &assertNotCalledClusterValidator{T: t} groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assert.Empty(t, c.K8sClient.(*fake.Clientset).Actions()) @@ -288,7 +287,7 @@ func TestRollingUpdateAllNeedUpdateNoFailOnValidate(t *testing.T) { c.ClusterValidator = &failingClusterValidator{} groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") asgGroups, _ := cloud.Autoscaling().DescribeAutoScalingGroups(ctx, &autoscaling.DescribeAutoScalingGroupsInput{}) @@ -298,10 +297,11 @@ func TestRollingUpdateAllNeedUpdateNoFailOnValidate(t *testing.T) { } func TestRollingUpdateNoneNeedUpdate(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := getGroups(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assert.Empty(t, c.K8sClient.(*fake.Clientset).Actions()) @@ -319,7 +319,7 @@ func TestRollingUpdateNoneNeedUpdateWithForce(t *testing.T) { c.Force = true - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") asgGroups, _ := cloud.Autoscaling().DescribeAutoScalingGroups(ctx, &autoscaling.DescribeAutoScalingGroupsInput{}) @@ -329,11 +329,12 @@ func TestRollingUpdateNoneNeedUpdateWithForce(t *testing.T) { } func TestRollingUpdateEmptyGroup(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := make(map[string]*cloudinstances.CloudInstanceGroup) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -343,12 +344,13 @@ func TestRollingUpdateEmptyGroup(t *testing.T) { } func TestRollingUpdateUnknownRole(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := getGroups(c.K8sClient, cloud) groups["node-1"].InstanceGroup.Spec.Role = "Unknown" - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -358,12 +360,13 @@ func TestRollingUpdateUnknownRole(t *testing.T) { } func TestRollingUpdateAllNeedUpdateFailsValidation(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &failingClusterValidator{} groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -373,12 +376,13 @@ func TestRollingUpdateAllNeedUpdateFailsValidation(t *testing.T) { } func TestRollingUpdateAllNeedUpdateErrorsValidation(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &erroringClusterValidator{} groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -388,26 +392,28 @@ func TestRollingUpdateAllNeedUpdateErrorsValidation(t *testing.T) { } func TestRollingUpdateNodes1NeedsUpdateFailsValidation(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &failingClusterValidator{} groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) } func TestRollingUpdateNodes1NeedsUpdateErrorsValidation(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &erroringClusterValidator{} groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -444,6 +450,7 @@ func (v *failAfterOneNodeClusterValidator) Validate() (*validation.ValidationClu } func TestRollingUpdateClusterFailsValidationAfterOneMaster(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &failAfterOneNodeClusterValidator{ @@ -453,7 +460,7 @@ func TestRollingUpdateClusterFailsValidationAfterOneMaster(t *testing.T) { } groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -463,6 +470,7 @@ func TestRollingUpdateClusterFailsValidationAfterOneMaster(t *testing.T) { } func TestRollingUpdateClusterErrorsValidationAfterOneMaster(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &failAfterOneNodeClusterValidator{ @@ -472,7 +480,7 @@ func TestRollingUpdateClusterErrorsValidationAfterOneMaster(t *testing.T) { } groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -482,6 +490,7 @@ func TestRollingUpdateClusterErrorsValidationAfterOneMaster(t *testing.T) { } func TestRollingUpdateNonRelatedInstanceGroupFailure(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := make(map[string]*cloudinstances.CloudInstanceGroup) @@ -494,7 +503,7 @@ func TestRollingUpdateNonRelatedInstanceGroupFailure(t *testing.T) { InstanceGroup: groups["node-2"].InstanceGroup, } - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -504,6 +513,7 @@ func TestRollingUpdateNonRelatedInstanceGroupFailure(t *testing.T) { } func TestRollingUpdateRelatedInstanceGroupFailure(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := make(map[string]*cloudinstances.CloudInstanceGroup) @@ -516,7 +526,7 @@ func TestRollingUpdateRelatedInstanceGroupFailure(t *testing.T) { InstanceGroup: groups["node-1"].InstanceGroup, } - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -526,6 +536,7 @@ func TestRollingUpdateRelatedInstanceGroupFailure(t *testing.T) { } func TestRollingUpdateMasterGroupFailure(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := make(map[string]*cloudinstances.CloudInstanceGroup) @@ -538,7 +549,7 @@ func TestRollingUpdateMasterGroupFailure(t *testing.T) { InstanceGroup: groups["master-1"].InstanceGroup, } - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -548,6 +559,7 @@ func TestRollingUpdateMasterGroupFailure(t *testing.T) { } func TestRollingUpdateValidationErrorInstanceGroupNil(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := make(map[string]*cloudinstances.CloudInstanceGroup) @@ -560,7 +572,7 @@ func TestRollingUpdateValidationErrorInstanceGroupNil(t *testing.T) { InstanceGroup: nil, } - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -570,6 +582,7 @@ func TestRollingUpdateValidationErrorInstanceGroupNil(t *testing.T) { } func TestRollingUpdateValidationErrorInstanceGroupExitableError(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := make(map[string]*cloudinstances.CloudInstanceGroup) @@ -583,7 +596,7 @@ func TestRollingUpdateValidationErrorInstanceGroupExitableError(t *testing.T) { InstanceGroup: groups["node-2"].InstanceGroup, } - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -591,10 +604,10 @@ func TestRollingUpdateValidationErrorInstanceGroupExitableError(t *testing.T) { assertGroupInstanceCount(t, cloud, "node-3", 3) assertGroupInstanceCount(t, cloud, "master-1", 2) assertGroupInstanceCount(t, cloud, "bastion-1", 1) - } func TestRollingUpdateClusterFailsValidationAfterOneNode(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &failAfterOneNodeClusterValidator{ @@ -605,13 +618,14 @@ func TestRollingUpdateClusterFailsValidationAfterOneNode(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 2) } func TestRollingUpdateClusterErrorsValidationAfterOneNode(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.ClusterValidator = &failAfterOneNodeClusterValidator{ @@ -622,7 +636,7 @@ func TestRollingUpdateClusterErrorsValidationAfterOneNode(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 3) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.Error(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 2) @@ -665,6 +679,7 @@ func (v *flappingClusterValidator) Validate() (*validation.ValidationCluster, er } func TestRollingUpdateFlappingValidation(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() // This should only take a few milliseconds, @@ -678,7 +693,7 @@ func TestRollingUpdateFlappingValidation(t *testing.T) { } groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -708,6 +723,7 @@ func (v *failThreeTimesClusterValidator) Validate() (*validation.ValidationClust } func TestRollingUpdateValidatesAfterBastion(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() // This should only take a few milliseconds, @@ -718,7 +734,7 @@ func TestRollingUpdateValidatesAfterBastion(t *testing.T) { c.ClusterValidator = &failThreeTimesClusterValidator{} groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -825,11 +841,12 @@ func assertGroupNeedUpdate(t *testing.T, groups map[string]*cloudinstances.Cloud } func TestRollingUpdateTaintAllButOneNeedUpdate(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 2) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") cordoned := "" @@ -870,6 +887,7 @@ func TestRollingUpdateTaintAllButOneNeedUpdate(t *testing.T) { } func TestRollingUpdateMaxSurgeIgnoredForMaster(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() two := intstr.FromInt(2) @@ -879,7 +897,7 @@ func TestRollingUpdateMaxSurgeIgnoredForMaster(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleControlPlane, 3, 2) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") cordoned := "" @@ -922,6 +940,7 @@ func TestRollingUpdateMaxSurgeIgnoredForMaster(t *testing.T) { } func TestRollingUpdateDisabled(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() c.CloudOnly = true @@ -930,7 +949,7 @@ func TestRollingUpdateDisabled(t *testing.T) { } groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -958,6 +977,7 @@ func (m *disabledSurgeTest) DetachInstances(ctx context.Context, input *autoscal } func TestRollingUpdateDisabledSurge(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() disabledSurgeTest := &disabledSurgeTest{ @@ -974,7 +994,7 @@ func TestRollingUpdateDisabledSurge(t *testing.T) { } groups := getGroupsAllNeedUpdate(c.K8sClient, cloud) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 3) @@ -1169,6 +1189,7 @@ func newConcurrentTest(t *testing.T, cloud *awsup.MockAWSCloud, numSurge int, al } func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() concurrentTest := newConcurrentTest(t, cloud, 0, true) @@ -1184,7 +1205,7 @@ func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 7) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -1192,6 +1213,7 @@ func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) { } func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() concurrentTest := newConcurrentTest(t, cloud, 0, false) @@ -1206,7 +1228,7 @@ func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 6) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 1) @@ -1214,6 +1236,7 @@ func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) { } func TestRollingUpdateMaxUnavailableAllNeedUpdateMaster(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() concurrentTest := newConcurrentTest(t, cloud, 0, true) @@ -1229,7 +1252,7 @@ func TestRollingUpdateMaxUnavailableAllNeedUpdateMaster(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleControlPlane, 7, 7) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "master-1", 0) @@ -1266,6 +1289,7 @@ func (e *ec2IgnoreTags) CreateTags(ctx context.Context, params *ec2.CreateTagsIn } func TestRollingUpdateMaxSurgeAllNeedUpdate(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() concurrentTest := newConcurrentTest(t, cloud, 2, true) @@ -1285,7 +1309,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdate(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 6, 6) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -1293,6 +1317,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdate(t *testing.T) { } func TestRollingUpdateMaxSurgeAllButOneNeedUpdate(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() concurrentTest := newConcurrentTest(t, cloud, 2, false) @@ -1311,7 +1336,7 @@ func TestRollingUpdateMaxSurgeAllButOneNeedUpdate(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 6) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 1) @@ -1329,6 +1354,7 @@ func (c *countDetach) DetachInstances(ctx context.Context, input *autoscaling.De } func TestRollingUpdateMaxSurgeGreaterThanNeedUpdate(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() countDetach := &countDetach{AutoScalingAPI: cloud.MockAutoscaling} @@ -1342,7 +1368,7 @@ func TestRollingUpdateMaxSurgeGreaterThanNeedUpdate(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 2) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 1) @@ -1358,6 +1384,7 @@ func (m *failDetachAutoscaling) DetachInstances(ctx context.Context, input *auto } func TestRollingUpdateDetachFails(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() cloud.MockAutoscaling = &failDetachAutoscaling{AutoScalingAPI: cloud.MockAutoscaling} @@ -1370,7 +1397,7 @@ func TestRollingUpdateDetachFails(t *testing.T) { groups := make(map[string]*cloudinstances.CloudInstanceGroup) makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 3, 2) - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 1) @@ -1474,6 +1501,7 @@ func (m *alreadyDetachedTestAutoscaling) DetachInstances(ctx context.Context, in } func TestRollingUpdateMaxSurgeAllNeedUpdateOneAlreadyDetached(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() alreadyDetachedTest := &alreadyDetachedTest{ @@ -1500,7 +1528,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateOneAlreadyDetached(t *testing.T) { makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 4, 4) alreadyDetachedTest.detached[groups["node-1"].NeedUpdate[3].ID] = true groups["node-1"].NeedUpdate[3].Status = cloudinstances.CloudInstanceStatusDetached - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -1508,6 +1536,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateOneAlreadyDetached(t *testing.T) { } func TestRollingUpdateMaxSurgeAllNeedUpdateMaxAlreadyDetached(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() // Should behave the same as TestRollingUpdateMaxUnavailableAllNeedUpdate @@ -1532,7 +1561,7 @@ func TestRollingUpdateMaxSurgeAllNeedUpdateMaxAlreadyDetached(t *testing.T) { groups["node-1"].NeedUpdate[6].Status = cloudinstances.CloudInstanceStatusNeedsUpdate // TODO verify those are the last two instances terminated - err := c.RollingUpdate(groups, &kopsapi.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kopsapi.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assertGroupInstanceCount(t, cloud, "node-1", 0) @@ -1555,7 +1584,7 @@ func assertTaint(t *testing.T, action testingclient.PatchAction) { } func assertGroupInstanceCount(t *testing.T, cloud awsup.AWSCloud, groupName string, expected int) { - asgGroups, _ := cloud.Autoscaling().DescribeAutoScalingGroups(context.Background(), &autoscaling.DescribeAutoScalingGroupsInput{ + asgGroups, _ := cloud.Autoscaling().DescribeAutoScalingGroups(context.TODO(), &autoscaling.DescribeAutoScalingGroupsInput{ AutoScalingGroupNames: []string{groupName}, }) for _, group := range asgGroups.AutoScalingGroups { diff --git a/pkg/instancegroups/rollingupdate_warmpool_test.go b/pkg/instancegroups/rollingupdate_warmpool_test.go index 9d31c5889ec99..927753122a9d3 100644 --- a/pkg/instancegroups/rollingupdate_warmpool_test.go +++ b/pkg/instancegroups/rollingupdate_warmpool_test.go @@ -35,6 +35,7 @@ import ( // Here we have three nodes that are up to date, while three warm nodes need updating. // Only the initial cluster validation should be run func TestRollingUpdateOnlyWarmPoolNodes(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() k8sClient := c.K8sClient groups := make(map[string]*cloudinstances.CloudInstanceGroup) @@ -45,12 +46,13 @@ func TestRollingUpdateOnlyWarmPoolNodes(t *testing.T) { assert.Equal(t, 3, len(groups["node-1"].NeedUpdate), "number of nodes needing update") - err := c.RollingUpdate(groups, &kops.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kops.InstanceGroupList{}) assert.NoError(t, err, "rolling update") assert.Equal(t, 1, validator.numValidations, "number of validations") } func TestRollingWarmPoolBeforeJoinedNodes(t *testing.T) { + ctx := context.TODO() c, cloud := getTestSetup() k8sClient := c.K8sClient groups := make(map[string]*cloudinstances.CloudInstanceGroup) @@ -62,7 +64,7 @@ func TestRollingWarmPoolBeforeJoinedNodes(t *testing.T) { } cloud.MockEC2 = warmPoolBeforeJoinedNodesTest - err := c.RollingUpdate(groups, &kops.InstanceGroupList{}) + err := c.RollingUpdate(ctx, groups, &kops.InstanceGroupList{}) assert.NoError(t, err, "rolling update")