From 6564ae6dc55078f53ac4d2e7a97c296441ac1655 Mon Sep 17 00:00:00 2001 From: Alexey Makhov Date: Fri, 1 Nov 2024 15:47:05 +0200 Subject: [PATCH] Fix machine implementation updating. Fix downscaling test Signed-off-by: Alexey Makhov --- cmd/main.go | 17 +++------- internal/controller/controlplane/helper.go | 11 +++++-- .../k0s_controlplane_controller.go | 8 ++--- ...pi_controlplane_docker_downscaling_test.go | 31 +++++-------------- 4 files changed, 24 insertions(+), 43 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index 4c6d8c4e8..344a7047c 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -22,9 +22,6 @@ import ( "fmt" "os" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. - "k8s.io/client-go/dynamic" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/tools/clientcmd" @@ -141,11 +138,6 @@ func main() { setupLog.Error(err, "unable to get kubernetes clientset") os.Exit(1) } - dynamicClient, err := dynamic.NewForConfig(restConfig) - if err != nil { - setupLog.Error(err, "unable to get kubernetes dynamic client") - os.Exit(1) - } if err = (&controller.ClusterReconciler{ Client: mgr.GetClient(), @@ -210,11 +202,10 @@ func main() { } if err = (&controlplane.K0sController{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - ClientSet: clientSet, - DynamicClient: dynamicClient, - RESTConfig: restConfig, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ClientSet: clientSet, + RESTConfig: restConfig, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "K0sController") os.Exit(1) diff --git a/internal/controller/controlplane/helper.go b/internal/controller/controlplane/helper.go index 9da30f5d0..d0ae4462e 100644 --- a/internal/controller/controlplane/helper.go +++ b/internal/controller/controlplane/helper.go @@ -134,10 +134,17 @@ func (c *K0sController) createMachineFromTemplate(ctx context.Context, name stri return nil, err } - resourceType := strings.ToLower(machineFromTemplate.GetKind()) + "s" + pluralName := "" + resList, _ := c.ClientSet.Discovery().ServerResourcesForGroupVersion(existingMachineFromTemplate.GetAPIVersion()) + for _, apiRes := range resList.APIResources { + if apiRes.Kind == existingMachineFromTemplate.GetKind() && !strings.Contains(apiRes.Name, "/") { + pluralName = apiRes.Name + break + } + } req := c.ClientSet.RESTClient().Patch(types.MergePatchType). Body(data). - AbsPath("apis", machineFromTemplate.GetAPIVersion(), "namespaces", machineFromTemplate.GetNamespace(), resourceType, machineFromTemplate.GetName()) + AbsPath("apis", machineFromTemplate.GetAPIVersion(), "namespaces", machineFromTemplate.GetNamespace(), pluralName, machineFromTemplate.GetName()) _, err = req.DoRaw(ctx) if err != nil { return nil, fmt.Errorf("error patching: %w", err) diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index c0e1d4851..db256752a 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "k8s.io/client-go/dynamic" "strings" "time" @@ -62,10 +61,9 @@ var ErrNewMachinesNotReady = fmt.Errorf("waiting for new machines") type K0sController struct { client.Client - Scheme *runtime.Scheme - ClientSet *kubernetes.Clientset - DynamicClient dynamic.Interface - RESTConfig *rest.Config + Scheme *runtime.Scheme + ClientSet *kubernetes.Clientset + RESTConfig *rest.Config } // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=k0scontrolplanes/status,verbs=get;list;watch;create;update;patch;delete diff --git a/inttest/capi-controlplane-docker-downscaling/capi_controlplane_docker_downscaling_test.go b/inttest/capi-controlplane-docker-downscaling/capi_controlplane_docker_downscaling_test.go index ec6efb944..e5309afd5 100644 --- a/inttest/capi-controlplane-docker-downscaling/capi_controlplane_docker_downscaling_test.go +++ b/inttest/capi-controlplane-docker-downscaling/capi_controlplane_docker_downscaling_test.go @@ -95,8 +95,7 @@ func (s *CAPIControlPlaneDockerDownScalingSuite) TestCAPIControlPlaneDockerDownS s.T().Log("cluster objects applied, waiting for cluster to be ready") var localPort int - // nolint:staticcheck - err := wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + err := wait.PollUntilContextCancel(s.ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { localPort, _ = getLBPort("docker-test-lb") return localPort > 0, nil }) @@ -106,8 +105,7 @@ func (s *CAPIControlPlaneDockerDownScalingSuite) TestCAPIControlPlaneDockerDownS kmcKC, err := util.GetKMCClientSet(s.ctx, s.client, "docker-test", "default", localPort) s.Require().NoError(err) - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextCancel(s.ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { b, _ := s.client.RESTClient(). Get(). AbsPath("/healthz"). @@ -118,8 +116,7 @@ func (s *CAPIControlPlaneDockerDownScalingSuite) TestCAPIControlPlaneDockerDownS s.Require().NoError(err) for i := 0; i < 3; i++ { - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextCancel(s.ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { nodeName := fmt.Sprintf("docker-test-%d", i) output, err := exec.Command("docker", "exec", nodeName, "k0s", "status").Output() if err != nil { @@ -134,8 +131,7 @@ func (s *CAPIControlPlaneDockerDownScalingSuite) TestCAPIControlPlaneDockerDownS s.T().Log("waiting for node to be ready") s.Require().NoError(util.WaitForNodeReadyStatus(s.ctx, kmcKC, "docker-test-worker-0", corev1.ConditionTrue)) - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextCancel(s.ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { result, err := kmcKC.RESTClient().Get().AbsPath("/apis/autopilot.k0sproject.io/v1beta2/controlnodes").DoRaw(ctx) if err != nil { return false, err @@ -150,28 +146,17 @@ func (s *CAPIControlPlaneDockerDownScalingSuite) TestCAPIControlPlaneDockerDownS s.Require().NoError(err) s.T().Log("scaling down control plane") - - out, err := exec.Command("docker", "exec", "docker-test-1", "k0s", "etcd", "leave").CombinedOutput() - s.T().Log(string(out)) - s.Require().NoError(err) - - out, err = exec.Command("docker", "exec", "docker-test-2", "k0s", "etcd", "leave").CombinedOutput() - s.T().Log(string(out)) - s.Require().NoError(err) - s.updateClusterObjects() - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(s.ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - output, err := exec.Command("docker", "exec", "docker-test-0", "k0s", "etcd", "member-list").CombinedOutput() + + err = wait.PollUntilContextCancel(s.ctx, 1*time.Second, true, func(ctx context.Context) (bool, error) { + ids, err := util.GetControlPlaneNodesIDs("docker-test") if err != nil { return false, nil } - return !strings.Contains(string(output), "docker-test-1"), nil + return len(ids) == 1, nil }) s.Require().NoError(err) - - s.Require().NoError(util.WaitForNodeReadyStatus(s.ctx, kmcKC, "docker-test-worker-0", corev1.ConditionTrue)) } func (s *CAPIControlPlaneDockerDownScalingSuite) applyClusterObjects() {