From bed89b12218da6c18c48a3bb7f69d5149d1257a1 Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Thu, 29 Aug 2019 10:56:01 +0300 Subject: [PATCH 1/4] Add a names package with all the global system names --- pkg/manager/manager.go | 3 ++- pkg/names/names.go | 15 +++++++++++++++ pkg/webhook/webhook.go | 23 ++++++++++------------- tests/tests.go | 4 ++-- 4 files changed, 29 insertions(+), 16 deletions(-) create mode 100644 pkg/names/names.go diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 54e6b280c..2eddead3a 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -33,6 +33,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/controller" + "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/names" poolmanager "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/pool-manager" "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/webhook" ) @@ -196,7 +197,7 @@ func (k *KubeMacPoolManager) markPodAsLeader() error { return err } - pod.Labels[webhook.LeaderLabel] = "true" + pod.Labels[names.LEADER_LABEL] = "true" _, err = k.clientset.CoreV1().Pods(k.podNamespace).Update(pod) if err != nil { return err diff --git a/pkg/names/names.go b/pkg/names/names.go new file mode 100644 index 000000000..a60e5e9ec --- /dev/null +++ b/pkg/names/names.go @@ -0,0 +1,15 @@ +package names + +const MANAGER_NAMESPACE = "kubemacpool-system" + +const MANAGER_DEPLOYMENT = "kubemacpool-mac-controller-manager" + +const WEBHOOK_SERVICE = "kubemacpool-service" + +const MUTATE_WEBHOOK = "kubemacpool-webhook" + +const MUTATE_WEBHOOK_CONFIG = "kubemacpool" + +const LEADER_LABEL = "kubemacpool-leader" + +const ADMISSION_IGNORE_LABEL = "kubemacpool/ignoreAdmission" diff --git a/pkg/webhook/webhook.go b/pkg/webhook/webhook.go index 83eaa77e3..9e395caa7 100644 --- a/pkg/webhook/webhook.go +++ b/pkg/webhook/webhook.go @@ -28,13 +28,10 @@ import ( runtimewebhook "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/names" "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/pool-manager" ) -const ( - LeaderLabel = "kubemacpool-leader" -) - // AddToManagerFuncs is a list of functions to add all Controllers to the Manager var AddToManagerFuncs []func(manager.Manager, *pool_manager.PoolManager, *metav1.LabelSelector) (*admission.Webhook, error) @@ -49,17 +46,17 @@ var AddToManagerFuncs []func(manager.Manager, *pool_manager.PoolManager, *metav1 // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;create;update;patch;list;watch // +kubebuilder:rbac:groups="kubevirt.io",resources=virtualmachines,verbs=get;list;watch;create;update;patch func AddToManager(mgr manager.Manager, poolManager *pool_manager.PoolManager) error { - svr, err := runtimewebhook.NewServer("kubemacpool-webhook", mgr, runtimewebhook.ServerOptions{ + svr, err := runtimewebhook.NewServer(names.MUTATE_WEBHOOK, mgr, runtimewebhook.ServerOptions{ CertDir: "/tmp/cert", Port: 8000, BootstrapOptions: &runtimewebhook.BootstrapOptions{ - MutatingWebhookConfigName: "kubemacpool", + MutatingWebhookConfigName: names.MUTATE_WEBHOOK_CONFIG, Service: &runtimewebhook.Service{ - Namespace: "kubemacpool-system", - Name: "kubemacpool-service", + Namespace: names.MANAGER_NAMESPACE, + Name: names.WEBHOOK_SERVICE, // Selectors should select the pods that runs this webhook server. Selectors: map[string]string{ - LeaderLabel: "true", + names.LEADER_LABEL: "true", }, }, }, @@ -68,7 +65,7 @@ func AddToManager(mgr manager.Manager, poolManager *pool_manager.PoolManager) er return err } - namespaceSelector := &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{{Key: "kubemacpool/ignoreAdmission", + namespaceSelector := &metav1.LabelSelector{MatchExpressions: []metav1.LabelSelectorRequirement{{Key: names.ADMISSION_IGNORE_LABEL, Operator: metav1.LabelSelectorOpDoesNotExist}}} webhooks := []runtimewebhook.Webhook{} @@ -94,13 +91,13 @@ func AddToManager(mgr manager.Manager, poolManager *pool_manager.PoolManager) er // We choose this solution because the sigs.k8s.io/controller-runtime package doesn't allow to customize // the ServerOptions object func CreateOwnerRefForMutatingWebhook(kubeClient *kubernetes.Clientset) error { - managerDeployment, err := kubeClient.AppsV1().Deployments("kubemacpool-system").Get("kubemacpool-mac-controller-manager", metav1.GetOptions{}) + managerDeployment, err := kubeClient.AppsV1().Deployments(names.MANAGER_NAMESPACE).Get(names.MANAGER_DEPLOYMENT, metav1.GetOptions{}) if err != nil { return err } ownerRefList := []metav1.OwnerReference{{Name: managerDeployment.Name, Kind: "Deployment", APIVersion: "apps/v1", UID: managerDeployment.UID}} - mutatingWebHookObject, err := kubeClient.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Get("kubemacpool", metav1.GetOptions{}) + mutatingWebHookObject, err := kubeClient.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Get(names.MUTATE_WEBHOOK_CONFIG, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { mutatingWebHookObject = &admissionregistration.MutatingWebhookConfiguration{ @@ -109,7 +106,7 @@ func CreateOwnerRefForMutatingWebhook(kubeClient *kubernetes.Clientset) error { Kind: "MutatingWebhookConfiguration", }, ObjectMeta: metav1.ObjectMeta{ - Name: "kubemacpool", + Name: names.MUTATE_WEBHOOK_CONFIG, OwnerReferences: ownerRefList, }} _, err = kubeClient.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Create(mutatingWebHookObject) diff --git a/tests/tests.go b/tests/tests.go index c8f5f0e0a..c45546ab0 100644 --- a/tests/tests.go +++ b/tests/tests.go @@ -21,7 +21,7 @@ import ( kubevirtv1 "kubevirt.io/kubevirt/pkg/api/v1" kubevirtutils "kubevirt.io/kubevirt/tools/vms-generator/utils" - "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/webhook" + "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/names" ) const ( @@ -164,7 +164,7 @@ func DeleteLeaderManager() { leaderPodName := "" for _, pod := range pods.Items { - if _, ok := pod.Labels[webhook.LeaderLabel]; ok { + if _, ok := pod.Labels[names.LEADER_LABEL]; ok { leaderPodName = pod.Name break } From d1048bacb5dd15a30c245701f3ec1af368a49990 Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Thu, 29 Aug 2019 10:57:42 +0300 Subject: [PATCH 2/4] Fix The test suite to Fail after printing all the logs. This commit also extend the time for tests to 20min. --- .travis.yml | 2 +- hack/functest.sh | 2 +- tests/tests_suite_test.go | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index eb969a54b..0a86772ba 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,7 +13,7 @@ script: - if [[ -n "$(git status --porcelain)" ]] ; then echo "It seems like you need to run make. Please run it and commit the changes"; git status --porcelain; false; fi - make docker-test - make deploy-test-cluster - - KUBECONFIG="`pwd`/cluster/dind-cluster/config" go test -v -race ./tests/... + - KUBECONFIG="`pwd`/cluster/dind-cluster/config" go test -timeout 30m -v -race ./tests/... deploy: - provider: script diff --git a/hack/functest.sh b/hack/functest.sh index b853d6453..006de766f 100755 --- a/hack/functest.sh +++ b/hack/functest.sh @@ -1,4 +1,4 @@ #!/usr/bin/env bash source hack/common.sh -KUBECONFIG=${MACPOOL_DIR}/cluster/$MACPOOL_PROVIDER/.kubeconfig go test -v -race ./tests/... +KUBECONFIG=${MACPOOL_DIR}/cluster/$MACPOOL_PROVIDER/.kubeconfig go test -timeout 20m -v -race ./tests/... diff --git a/tests/tests_suite_test.go b/tests/tests_suite_test.go index 74a99c195..1c46c04ef 100644 --- a/tests/tests_suite_test.go +++ b/tests/tests_suite_test.go @@ -48,4 +48,6 @@ func KubemacPoolFailedFunction(message string, callerSkip ...int) { fmt.Printf("Pod Name: %s \n", pod.Name) fmt.Println(string(output)) } + + Fail(message, callerSkip...) } From 719dc552df6af51bb79cdf1a0e4f428c7da7998d Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Thu, 29 Aug 2019 12:28:45 +0300 Subject: [PATCH 3/4] Change unit tests to use the CreatePoolManager function --- pkg/pool-manager/pool_test.go | 87 +++++++++-------------------------- 1 file changed, 21 insertions(+), 66 deletions(-) diff --git a/pkg/pool-manager/pool_test.go b/pkg/pool-manager/pool_test.go index c312e5c04..8cabbe50b 100644 --- a/pkg/pool-manager/pool_test.go +++ b/pkg/pool-manager/pool_test.go @@ -29,12 +29,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" kubevirt "kubevirt.io/kubevirt/pkg/api/v1" + + "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/names" ) var _ = Describe("Pool", func() { beforeAllocationAnnotation := map[string]string{networksAnnotation: `[{ "name": "ovs-conf"}]`} afterAllocationAnnotation := map[string]string{networksAnnotation: `[{"name":"ovs-conf","namespace":"default","mac":"02:00:00:00:00:00"}]`} samplePod := v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podpod", Namespace: "default", Annotations: afterAllocationAnnotation}} + vmConfigMap := v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: names.MANAGER_NAMESPACE, Name: vmWaitConfigMapName}} createPoolManager := func(startMacAddr, endMacAddr string, fakeObjectsForClient ...runtime.Object) *PoolManager { fakeClient := fake.NewSimpleClientset(fakeObjectsForClient...) @@ -100,13 +103,7 @@ var _ = Describe("Pool", func() { Describe("Pool Manager General Functions ", func() { It("should create a pool manager", func() { - fakeClient := fake.NewSimpleClientset() - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:FF:FF:FF:FF:FF") - Expect(err).ToNot(HaveOccurred()) - _, err = NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + createPoolManager("02:00:00:00:00:00", "02:FF:FF:FF:FF:FF") }) It("should fail to create pool manager when rangeStart is greater than rangeEnd", func() { @@ -196,17 +193,11 @@ var _ = Describe("Pool", func() { Networks: []kubevirt.Network{podNetwork, multusNetwork}}}}} It("should allocate a new mac and release it for masquerade", func() { - fakeClient := fake.NewSimpleClientset(&samplePod) - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:02") - Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &samplePod, &vmConfigMap) newVM := masqueradeVM newVM.Name = "newVM" - err = poolManager.AllocateVirtualMachineMac(&newVM) + err := poolManager.AllocateVirtualMachineMac(&newVM) Expect(err).ToNot(HaveOccurred()) Expect(len(poolManager.macPoolMap)).To(Equal(2)) @@ -226,32 +217,20 @@ var _ = Describe("Pool", func() { Expect(exist).To(BeFalse()) }) It("should not allocate a new mac for bridge interface on pod network", func() { - fakeClient := fake.NewSimpleClientset() - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:02") - Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &vmConfigMap) newVM := sampleVM newVM.Name = "newVM" - err = poolManager.AllocateVirtualMachineMac(&newVM) + err := poolManager.AllocateVirtualMachineMac(&newVM) Expect(err).ToNot(HaveOccurred()) Expect(len(poolManager.macPoolMap)).To(Equal(0)) }) It("should allocate a new mac and release it for multiple interfaces", func() { - fakeClient := fake.NewSimpleClientset(&samplePod) - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:02") - Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &samplePod, &vmConfigMap) newVM := multipleInterfacesVM.DeepCopy() newVM.Name = "newVM" - err = poolManager.AllocateVirtualMachineMac(newVM) + err := poolManager.AllocateVirtualMachineMac(newVM) Expect(err).ToNot(HaveOccurred()) Expect(len(poolManager.macPoolMap)).To(Equal(3)) @@ -277,16 +256,10 @@ var _ = Describe("Pool", func() { }) Describe("Update vm object", func() { It("should preserve mac addresses on update", func() { - fakeClient := fake.NewSimpleClientset() - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:02") - Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &vmConfigMap) newVM := multipleInterfacesVM.DeepCopy() newVM.Name = "newVM" - err = poolManager.AllocateVirtualMachineMac(newVM) + err := poolManager.AllocateVirtualMachineMac(newVM) Expect(err).ToNot(HaveOccurred()) Expect(newVM.Spec.Template.Spec.Domain.Devices.Interfaces[0].MacAddress).To(Equal("02:00:00:00:00:00")) Expect(newVM.Spec.Template.Spec.Domain.Devices.Interfaces[1].MacAddress).To(Equal("02:00:00:00:00:01")) @@ -298,17 +271,11 @@ var _ = Describe("Pool", func() { Expect(updateVm.Spec.Template.Spec.Domain.Devices.Interfaces[1].MacAddress).To(Equal("02:00:00:00:00:01")) }) It("should preserve mac addresses and allocate a requested one on update", func() { - fakeClient := fake.NewSimpleClientset() - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:02") - Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &vmConfigMap) newVM := multipleInterfacesVM.DeepCopy() newVM.Name = "newVM" - err = poolManager.AllocateVirtualMachineMac(newVM) + err := poolManager.AllocateVirtualMachineMac(newVM) Expect(err).ToNot(HaveOccurred()) Expect(newVM.Spec.Template.Spec.Domain.Devices.Interfaces[0].MacAddress).To(Equal("02:00:00:00:00:00")) Expect(newVM.Spec.Template.Spec.Domain.Devices.Interfaces[1].MacAddress).To(Equal("02:00:00:00:00:01")) @@ -325,7 +292,7 @@ var _ = Describe("Pool", func() { Expect(exist).To(BeFalse()) }) It("should allow to add a new interface on update", func() { - poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02") + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &vmConfigMap) newVM := multipleInterfacesVM.DeepCopy() newVM.Name = "newVM" @@ -356,7 +323,7 @@ var _ = Describe("Pool", func() { Expect(exist).To(BeTrue()) }) It("should allow to remove an interface on update", func() { - poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02") + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &vmConfigMap) newVM := multipleInterfacesVM.DeepCopy() newVM.Name = "newVM" newVM.Spec.Template.Spec.Domain.Devices.Interfaces = append(newVM.Spec.Template.Spec.Domain.Devices.Interfaces, anotherMultusBridgeInterface) @@ -380,7 +347,7 @@ var _ = Describe("Pool", func() { Expect(exist).To(BeFalse()) }) It("should allow to remove and add an interface on update", func() { - poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02") + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &vmConfigMap) newVM := multipleInterfacesVM.DeepCopy() newVM.Name = "newVM" @@ -408,18 +375,12 @@ var _ = Describe("Pool", func() { Describe("Pool Manager Functions For pod", func() { It("should allocate a new mac and release it", func() { - fakeClient := fake.NewSimpleClientset(&samplePod) - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:02") - Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &samplePod, &vmConfigMap) newPod := samplePod newPod.Name = "newPod" newPod.Annotations = beforeAllocationAnnotation - err = poolManager.AllocatePodMac(&newPod) + err := poolManager.AllocatePodMac(&newPod) Expect(err).ToNot(HaveOccurred()) Expect(len(poolManager.macPoolMap)).To(Equal(2)) @@ -443,17 +404,11 @@ var _ = Describe("Pool", func() { Expect(exist).To(BeFalse()) }) It("should allocate requested mac when empty", func() { - fakeClient := fake.NewSimpleClientset() - startPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") - Expect(err).ToNot(HaveOccurred()) - endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:02") - Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) - Expect(err).ToNot(HaveOccurred()) + poolManager := createPoolManager("02:00:00:00:00:00", "02:00:00:00:00:02", &vmConfigMap) newPod := samplePod newPod.Name = "newPod" - err = poolManager.AllocatePodMac(&newPod) + err := poolManager.AllocatePodMac(&newPod) Expect(err).ToNot(HaveOccurred()) Expect(newPod.Annotations[networksAnnotation]).To(Equal(afterAllocationAnnotation[networksAnnotation])) }) From 6f2a41c0c7d918045a6a5f906499c293b26aafa0 Mon Sep 17 00:00:00 2001 From: Sebastian Sch Date: Thu, 29 Aug 2019 12:30:37 +0300 Subject: [PATCH 4/4] Before this PR we have an issue if the vm creation fails because there is an validation error from the virt-api we didn't clean the allocation. This PR introduce a cleanup loop into the system. When we hit the create vm mutating webhook we create the regular allocation but we also mark the mac address in a waiting configmap. We have a waiting look the will check if the object is removed from the map and if is not after 30 second we assume the object wasn't saved into the etcd (no controller event) so we release the object. If we get an event from the controller then we remove the mac address from the configmap. --- cmd/manager/main.go | 4 +- config/default/manager/manager.yaml | 1 + config/release/kubemacpool.yaml | 1 + config/test/kubemacpool.yaml | 1 + config/test/manager_image_patch.yaml | 1 + .../virtualmachine_controller.go | 3 + pkg/manager/manager.go | 20 +-- pkg/pool-manager/pool.go | 5 +- pkg/pool-manager/pool_test.go | 8 +- pkg/pool-manager/virtualmachine_pool.go | 151 +++++++++++++++++- tests/virtual_machines_test.go | 6 +- 11 files changed, 176 insertions(+), 25 deletions(-) diff --git a/cmd/manager/main.go b/cmd/manager/main.go index e18beb3c4..19904bbd8 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -43,9 +43,11 @@ func loadMacAddressFromEnvVar(envName string) (net.HardwareAddr, error) { func main() { var logType, metricsAddr string + var waitingTime int flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") flag.StringVar(&logType, "v", "production", "Log type (debug/production).") + flag.IntVar(&waitingTime, "wait-time", 600, "waiting time to release the mac if object was not created") flag.Parse() if logType == "debug" { @@ -80,7 +82,7 @@ func main() { os.Exit(1) } - kubemacpoolManager := manager.NewKubeMacPoolManager(podNamespace, podName, metricsAddr) + kubemacpoolManager := manager.NewKubeMacPoolManager(podNamespace, podName, metricsAddr, waitingTime) err = kubemacpoolManager.Run(rangeStart, rangeEnd) if err != nil { diff --git a/config/default/manager/manager.yaml b/config/default/manager/manager.yaml index 7dfde67a6..5fa0fd867 100644 --- a/config/default/manager/manager.yaml +++ b/config/default/manager/manager.yaml @@ -57,6 +57,7 @@ spec: - /manager args: - "--v=production" + - "--wait-time=600" image: quay.io/kubevirt/kubemacpool:latest imagePullPolicy: Always name: manager diff --git a/config/release/kubemacpool.yaml b/config/release/kubemacpool.yaml index 6dc4edec4..14a218aeb 100644 --- a/config/release/kubemacpool.yaml +++ b/config/release/kubemacpool.yaml @@ -172,6 +172,7 @@ spec: containers: - args: - --v=production + - --wait-time=600 command: - /manager env: diff --git a/config/test/kubemacpool.yaml b/config/test/kubemacpool.yaml index 77b8db04c..9b62f2d50 100644 --- a/config/test/kubemacpool.yaml +++ b/config/test/kubemacpool.yaml @@ -172,6 +172,7 @@ spec: containers: - args: - --v=debug + - --wait-time=10 command: - /manager env: diff --git a/config/test/manager_image_patch.yaml b/config/test/manager_image_patch.yaml index 7a60cc0d5..90b1fdd4a 100644 --- a/config/test/manager_image_patch.yaml +++ b/config/test/manager_image_patch.yaml @@ -10,3 +10,4 @@ spec: name: manager args: - "--v=debug" + - "--wait-time=10" diff --git a/pkg/controller/virtualmachine/virtualmachine_controller.go b/pkg/controller/virtualmachine/virtualmachine_controller.go index e3a44a817..c6ef6d7c8 100644 --- a/pkg/controller/virtualmachine/virtualmachine_controller.go +++ b/pkg/controller/virtualmachine/virtualmachine_controller.go @@ -113,6 +113,7 @@ func (r *ReconcilePolicy) addFinalizerAndUpdate(virtualMachine *kubevirt.Virtual if helper.ContainsString(virtualMachine.ObjectMeta.Finalizers, pool_manager.RuntimeObjectFinalizerName) { return nil } + log.V(1).Info("The VM does not have a finalizer", "virtualMachineName", request.Name, "virtualMachineNamespace", request.Namespace) @@ -130,6 +131,8 @@ func (r *ReconcilePolicy) addFinalizerAndUpdate(virtualMachine *kubevirt.Virtual "virtualMachineName", request.Name, "virtualMachineNamespace", request.Namespace) + r.poolManager.MarkVMAsReady(virtualMachine) + return nil } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 2eddead3a..073f63123 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -45,15 +45,16 @@ type KubeMacPoolManager struct { config *rest.Config metricsAddr string continueToRunManager bool - restartChannel chan struct{} - kubevirtInstalledChannel chan struct{} - stopSignalChannel chan os.Signal - podNamespace string - podName string - resourceLock resourcelock.Interface + restartChannel chan struct{} // Close the channel if we need to regenerate certs + kubevirtInstalledChannel chan struct{} // This channel is close after we found kubevirt to reload the manager + stopSignalChannel chan os.Signal // stop channel signal + podNamespace string // manager pod namespace + podName string // manager pod name + waitingTime int // Duration in second to lock a mac address before it was saved to etcd + resourceLock resourcelock.Interface // Use for the leader election } -func NewKubeMacPoolManager(podNamespace, podName, metricsAddr string) *KubeMacPoolManager { +func NewKubeMacPoolManager(podNamespace, podName, metricsAddr string, waitingTime int) *KubeMacPoolManager { kubemacpoolManager := &KubeMacPoolManager{ continueToRunManager: true, restartChannel: make(chan struct{}), @@ -61,7 +62,8 @@ func NewKubeMacPoolManager(podNamespace, podName, metricsAddr string) *KubeMacPo stopSignalChannel: make(chan os.Signal, 1), podNamespace: podNamespace, podName: podName, - metricsAddr: metricsAddr} + metricsAddr: metricsAddr, + waitingTime: waitingTime} signal.Notify(kubemacpoolManager.stopSignalChannel, os.Interrupt, os.Kill) @@ -120,7 +122,7 @@ func (k *KubeMacPoolManager) Run(rangeStart, rangeEnd net.HardwareAddr) error { } isKubevirtInstalled := checkForKubevirt(k.clientset) - poolManager, err := poolmanager.NewPoolManager(k.clientset, rangeStart, rangeEnd, isKubevirtInstalled) + poolManager, err := poolmanager.NewPoolManager(k.clientset, rangeStart, rangeEnd, isKubevirtInstalled, k.waitingTime) if err != nil { return fmt.Errorf("unable to create pool manager error %v", err) } diff --git a/pkg/pool-manager/pool.go b/pkg/pool-manager/pool.go index a57f3a595..09c7775d1 100644 --- a/pkg/pool-manager/pool.go +++ b/pkg/pool-manager/pool.go @@ -31,6 +31,7 @@ const ( RuntimeObjectFinalizerName = "k8s.v1.cni.cncf.io/kubeMacPool" networksAnnotation = "k8s.v1.cni.cncf.io/networks" networksStatusAnnotation = "k8s.v1.cni.cncf.io/networks-status" + vmWaitConfigMapName = "kubemacpool-vm-configmap" ) var log = logf.Log.WithName("PoolManager") @@ -54,7 +55,7 @@ const ( AllocationStatusWaitingForPod AllocationStatus = "WaitingForPod" ) -func NewPoolManager(kubeClient kubernetes.Interface, rangeStart, rangeEnd net.HardwareAddr, kubevirtExist bool) (*PoolManager, error) { +func NewPoolManager(kubeClient kubernetes.Interface, rangeStart, rangeEnd net.HardwareAddr, kubevirtExist bool, waitTime int) (*PoolManager, error) { err := checkRange(rangeStart, rangeEnd) if err != nil { return nil, err @@ -86,6 +87,8 @@ func NewPoolManager(kubeClient kubernetes.Interface, rangeStart, rangeEnd net.Ha return nil, err } + go poolManger.vmWaitingCleanupLook(waitTime) + return poolManger, nil } diff --git a/pkg/pool-manager/pool_test.go b/pkg/pool-manager/pool_test.go index 8cabbe50b..f32efe0ca 100644 --- a/pkg/pool-manager/pool_test.go +++ b/pkg/pool-manager/pool_test.go @@ -45,7 +45,7 @@ var _ = Describe("Pool", func() { Expect(err).ToNot(HaveOccurred()) endPoolRangeEnv, err := net.ParseMAC(endMacAddr) Expect(err).ToNot(HaveOccurred()) - poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) + poolManager, err := NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false, 10) Expect(err).ToNot(HaveOccurred()) return poolManager @@ -112,7 +112,7 @@ var _ = Describe("Pool", func() { Expect(err).ToNot(HaveOccurred()) endPoolRangeEnv, err := net.ParseMAC("02:00:00:00:00:00") Expect(err).ToNot(HaveOccurred()) - _, err = NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) + _, err = NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false, 10) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal("Invalid range. rangeStart: 0a:00:00:00:00:00 rangeEnd: 02:00:00:00:00:00")) @@ -124,7 +124,7 @@ var _ = Describe("Pool", func() { Expect(err).ToNot(HaveOccurred()) endPoolRangeEnv, err := net.ParseMAC("06:00:00:00:00:00") Expect(err).ToNot(HaveOccurred()) - _, err = NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) + _, err = NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false, 10) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal("RangeStart is invalid: invalid mac address. Multicast addressing is not supported. Unicast addressing must be used. The first octet is 0X3")) @@ -136,7 +136,7 @@ var _ = Describe("Pool", func() { Expect(err).ToNot(HaveOccurred()) endPoolRangeEnv, err := net.ParseMAC("05:00:00:00:00:00") Expect(err).ToNot(HaveOccurred()) - _, err = NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false) + _, err = NewPoolManager(fakeClient, startPoolRangeEnv, endPoolRangeEnv, false, 10) Expect(err).To(HaveOccurred()) Expect(err.Error()).To(Equal("RangeEnd is invalid: invalid mac address. Multicast addressing is not supported. Unicast addressing must be used. The first octet is 0X5")) }) diff --git a/pkg/pool-manager/virtualmachine_pool.go b/pkg/pool-manager/virtualmachine_pool.go index f799bae81..1ed8aa518 100644 --- a/pkg/pool-manager/virtualmachine_pool.go +++ b/pkg/pool-manager/virtualmachine_pool.go @@ -19,11 +19,15 @@ package pool_manager import ( "fmt" "net" - - "k8s.io/apimachinery/pkg/api/errors" + "strings" + "time" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubevirt "kubevirt.io/kubevirt/pkg/api/v1" + + "github.com/K8sNetworkPlumbingWG/kubemacpool/pkg/names" ) func (p *PoolManager) AllocateVirtualMachineMac(virtualMachine *kubevirt.VirtualMachine) error { @@ -78,10 +82,17 @@ func (p *PoolManager) AllocateVirtualMachineMac(virtualMachine *kubevirt.Virtual return err } copyVM.Spec.Template.Spec.Domain.Devices.Interfaces[idx].MacAddress = macAddr - allocations[iface.Name] = iface.MacAddress + allocations[iface.Name] = macAddr } } + + err := p.AddMacToWaitingConfig(allocations) + if err != nil { + return err + } + virtualMachine.Spec.Template.Spec.Domain.Devices.Interfaces = copyVM.Spec.Template.Spec.Domain.Devices.Interfaces + return nil } @@ -198,7 +209,7 @@ func (p *PoolManager) allocateFromPoolForVirtualMachine(virtualMachine *kubevirt return "", err } - p.macPoolMap[macAddr.String()] = AllocationStatusAllocated + p.macPoolMap[macAddr.String()] = AllocationStatusWaitingForPod log.Info("mac from pool was allocated for virtual machine", "allocatedMac", macAddr.String(), "virtualMachineName", virtualMachine.Name, @@ -219,7 +230,7 @@ func (p *PoolManager) allocateRequestedVirtualMachineInterfaceMac(virtualMachine return err } - p.macPoolMap[requestedMac] = AllocationStatusAllocated + p.macPoolMap[requestedMac] = AllocationStatusWaitingForPod log.Info("requested mac was allocated for virtual machine", "requestedMap", requestedMac, "virtualMachineName", virtualMachine.Name, @@ -232,13 +243,24 @@ func (p *PoolManager) initVirtualMachineMap() error { if !p.isKubevirt { return nil } + + waitingMac, err := p.getOrCreateVmMacWaitMap() + if err != nil { + return err + } + + for macAddress := range waitingMac { + macAddress = strings.Replace(macAddress, "-", ":", 5) + p.macPoolMap[macAddress] = AllocationStatusWaitingForPod + } + result := p.kubeClient.ExtensionsV1beta1().RESTClient().Get().RequestURI("apis/kubevirt.io/v1alpha3/virtualmachines").Do() if result.Error() != nil { return result.Error() } vms := &kubevirt.VirtualMachineList{} - err := result.Into(vms) + err = result.Into(vms) if err != nil { return err } @@ -333,6 +355,123 @@ func (p *PoolManager) revertAllocationOnVm(vmName string, allocations map[string p.releaseMacAddressesFromInterfaceMap(allocations) } +// This function return or creates a config map that contains mac address and the allocation time. +func (p *PoolManager) getOrCreateVmMacWaitMap() (map[string]string, error) { + configMap, err := p.kubeClient.CoreV1().ConfigMaps(names.MANAGER_NAMESPACE).Get(vmWaitConfigMapName, metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + _, err = p.kubeClient.CoreV1(). + ConfigMaps(names.MANAGER_NAMESPACE). + Create(&corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: vmWaitConfigMapName, + Namespace: names.MANAGER_NAMESPACE}}) + + return map[string]string{}, nil + } + + return nil, err + } + + return configMap.Data, nil +} + +// Add all the allocated mac addresses to the waiting config map with the current time. +func (p *PoolManager) AddMacToWaitingConfig(allocations map[string]string) error { + configMap, err := p.kubeClient.CoreV1().ConfigMaps(names.MANAGER_NAMESPACE).Get(vmWaitConfigMapName, metav1.GetOptions{}) + if err != nil { + return err + } + + if configMap.Data == nil { + configMap.Data = map[string]string{} + } + + for _, macAddress := range allocations { + log.V(1).Info("add mac address to waiting config", "macAddress", macAddress) + macAddress = strings.Replace(macAddress, ":", "-", 5) + configMap.Data[macAddress] = time.Now().Format(time.RFC3339) + } + + _, err = p.kubeClient.CoreV1().ConfigMaps(names.MANAGER_NAMESPACE).Update(configMap) + return err +} + +// Remove all the mac addresses from the waiting configmap this mean the vm was saved in the etcd and pass validations +func (p *PoolManager) MarkVMAsReady(vm *kubevirt.VirtualMachine) error { + p.poolMutex.Lock() + defer p.poolMutex.Unlock() + + configMap, err := p.kubeClient.CoreV1().ConfigMaps(names.MANAGER_NAMESPACE).Get(vmWaitConfigMapName, metav1.GetOptions{}) + if err != nil { + return err + } + + if configMap.Data == nil { + log.Info("the configMap is empty") + return nil + } + + for _, vmInterface := range vm.Spec.Template.Spec.Domain.Devices.Interfaces { + if vmInterface.MacAddress != "" { + p.macPoolMap[vmInterface.MacAddress] = AllocationStatusAllocated + macAddress := strings.Replace(vmInterface.MacAddress, ":", "-", 5) + delete(configMap.Data, macAddress) + } + } + + _, err = p.kubeClient.CoreV1().ConfigMaps(names.MANAGER_NAMESPACE).Update(configMap) + log.V(1).Info("marked virtual machine as ready", "virtualMachineNamespace", vm.Namespace, + "virtualMachineName", vm.Name) + return err +} + +// This function check if there are virtual machines that hits the create +// mutating webhook but we didn't get the creation event in the controller loop +// this mean the create was failed by some other mutating or validating webhook +// so we release the virtual machine +func (p *PoolManager) vmWaitingCleanupLook(waitTime int) { + c := time.Tick(3 * time.Second) + log.Info("starting cleanup loop for waiting mac addresses") + for _ = range c { + p.poolMutex.Lock() + + configMap, err := p.kubeClient.CoreV1().ConfigMaps(names.MANAGER_NAMESPACE).Get(vmWaitConfigMapName, metav1.GetOptions{}) + if err != nil { + log.Error(err, "failed to get config map", "configMapName", vmWaitConfigMapName) + p.poolMutex.Unlock() + continue + } + + if configMap.Data == nil { + log.Info("the configMap is empty", "configMapName", vmWaitConfigMapName) + p.poolMutex.Unlock() + continue + } + + for macAddress, allocationTime := range configMap.Data { + t, err := time.Parse(time.RFC3339, allocationTime) + if err != nil { + // TODO: remove the mac from the wait map?? + log.Error(err, "failed to parse allocation time") + continue + } + + if time.Now().After(t.Add(time.Duration(waitTime) * time.Second)) { + delete(configMap.Data, macAddress) + macAddress = strings.Replace(macAddress, "-", ":", 5) + delete(p.macPoolMap, macAddress) + log.V(1).Info("released mac address in waiting loop", "macAddress", macAddress) + } + } + + _, err = p.kubeClient.CoreV1().ConfigMaps(names.MANAGER_NAMESPACE).Update(configMap) + if err != nil { + log.Error(err, "failed to update config map", "configMapName", vmWaitConfigMapName) + } + + p.poolMutex.Unlock() + } +} + func vmNamespaced(machine *kubevirt.VirtualMachine) string { return fmt.Sprintf("%s/%s", machine.Namespace, machine.Name) } diff --git a/tests/virtual_machines_test.go b/tests/virtual_machines_test.go index 88b798533..ed1f61c4e 100644 --- a/tests/virtual_machines_test.go +++ b/tests/virtual_machines_test.go @@ -370,10 +370,8 @@ var _ = Describe("Virtual Machines", func() { } }) }) - //TODO: remove the the pending annotation -"P"- from "PContext" when issue #44 is fixed : - //https://github.com/K8sNetworkPlumbingWG/kubemacpool/issues/44 //2633 - PContext("When we re-apply a failed VM yaml", func() { + Context("When we re-apply a failed VM yaml", func() { It("should allow to assign to the VM the same MAC addresses, with name as requested before and do not return an error", func() { err := setRange(rangeStart, rangeEnd) Expect(err).ToNot(HaveOccurred()) @@ -485,7 +483,7 @@ var _ = Describe("Virtual Machines", func() { Eventually(func() error { return testClient.VirtClient.Create(context.TODO(), anotherVm) - }, 40*time.Second, 5*time.Second).Should(Not(HaveOccurred()), "failed to apply the new vm object") + }, timeout, pollingInterval).Should(Not(HaveOccurred()), "failed to apply the new vm object") _, err = net.ParseMAC(anotherVm.Spec.Template.Spec.Domain.Devices.Interfaces[0].MacAddress) Expect(err).ToNot(HaveOccurred()) })