diff --git a/test/e2e/persistentips-primary_test.go b/test/e2e/persistentips-primary_test.go new file mode 100644 index 0000000..06e23c5 --- /dev/null +++ b/test/e2e/persistentips-primary_test.go @@ -0,0 +1,396 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2024 Red Hat, Inc. + * + */ + +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + + kubevirtv1 "kubevirt.io/api/core/v1" + + testenv "github.com/kubevirt/ipam-extensions/test/env" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Persistent IPs on Primary UDN interface", func() { + var failureCount int = 0 + JustAfterEach(func() { + if CurrentSpecReport().Failed() { + failureCount++ + By(fmt.Sprintf("Test failed, collecting logs and artifacts, failure count %d", failureCount)) + + logCommand([]string{"get", "pods", "-A"}, "pods", failureCount) + logCommand([]string{"get", "vm", "-A", "-oyaml"}, "vms", failureCount) + logCommand([]string{"get", "vmi", "-A", "-oyaml"}, "vmis", failureCount) + logCommand([]string{"get", "ipamclaims", "-A", "-oyaml"}, "ipamclaims", failureCount) + logOvnPods(failureCount) + } + }) + + When("network attachment definition created with allowPersistentIPs=true", func() { + var ( + td testenv.TestData + networkInterfaceName = "ovn-udn1" + vm *kubevirtv1.VirtualMachine + vmi *kubevirtv1.VirtualMachineInstance + nad *nadv1.NetworkAttachmentDefinition + ) + BeforeEach(func() { + td = testenv.GenerateTestData() + td.SetUp() + DeferCleanup(func() { + td.TearDown() + }) + + nad = testenv.GenerateLayer2WithSubnetNAD(td.Namespace, "primary") + vmi = testenv.GenerateAlpineWithPrimaryUDNVMI(td.Namespace) + vm = testenv.NewVirtualMachine(vmi, testenv.WithRunning()) + + By("Create NetworkAttachmentDefinition") + Expect(testenv.Client.Create(context.Background(), nad)).To(Succeed()) + }) + Context("and a virtual machine using it is also created", func() { + var originalVMIDefaultNetworkStatus *nadv1.NetworkStatus + var err error + BeforeEach(func() { + By("Creating VM with primary UDN") + Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed()) + + By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name)) + Eventually(testenv.ThisVMReadiness(vm)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(BeTrue()) + + By("Wait for IPAMClaim to get created") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + originalVMIDefaultNetworkStatus, err = getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(originalVMIDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName)) + Expect(originalVMIDefaultNetworkStatus.IPs).ToNot(BeEmpty()) + }) + + It("should keep ips after live migration", func() { + vmiIPsBeforeMigration := originalVMIDefaultNetworkStatus.IPs + + testenv.LiveMigrateVirtualMachine(td.Namespace, vm.Name) + testenv.CheckLiveMigrationSucceeded(td.Namespace, vm.Name) + + By("Wait for VMI to be ready after live migration") + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(testenv.ContainConditionVMIReady()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + targetVMIDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(targetVMIDefaultNetworkStatus.Interface).To(Equal(originalVMIDefaultNetworkStatus.Interface)) + Expect(targetVMIDefaultNetworkStatus.IPs).To(ConsistOf(vmiIPsBeforeMigration)) + }) + + It("should garbage collect IPAMClaims after VM deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM foreground deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + When("the VM is stopped", func() { + BeforeEach(func() { + By("Invoking virtctl stop") + output, err := exec.Command("virtctl", "stop", "-n", td.Namespace, vmi.Name).CombinedOutput() + Expect(err).NotTo(HaveOccurred(), output) + + By("Ensuring VM is not running") + Eventually(testenv.ThisVMI(vmi), 360*time.Second, 1*time.Second).Should( + SatisfyAll( + Not(testenv.BeCreated()), + Not(testenv.BeReady()), + )) + + Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM is deleted", func() { + By("Delete VM and check ipam claims are gone") + Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM is foreground deleted", func() { + By("Foreground delete VM and check ipam claims are gone") + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + + It("should keep ips after restart", func() { + defaultNetworkStatusBeforeRestart, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + vmiIPsBeforeRestart := defaultNetworkStatusBeforeRestart.IPs + vmiUUIDBeforeRestart := vmi.UID + + By("Re-starting the VM") + output, err := exec.Command("virtctl", "restart", "-n", td.Namespace, vmi.Name).CombinedOutput() + Expect(err).NotTo(HaveOccurred(), output) + + By("Wait for a new VMI to be re-started") + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(90 * time.Second). + Should(testenv.BeRestarted(vmiUUIDBeforeRestart)) + + By("Wait for VMI to be ready after restart") + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(testenv.ContainConditionVMIReady()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + defaultNetworkStatusAfterRestart, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(defaultNetworkStatusAfterRestart.Interface).To(Equal(defaultNetworkStatusBeforeRestart.Interface)) + Expect(defaultNetworkStatusAfterRestart.IPs).To(ConsistOf(vmiIPsBeforeRestart)) + }) + }) + + When("requested for a VM whose VMI has extra finalizers", func() { + const testFinalizer = "testFinalizer" + + BeforeEach(func() { + By("Adding VMI custom finalizer to control VMI deletion") + vm.Spec.Template.ObjectMeta.Finalizers = []string{testFinalizer} + + By("Creating VM with secondary attachments") + Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed()) + + By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name)) + Eventually(testenv.ThisVMReadiness(vm)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(BeTrue()) + + By("Wait for IPAMClaim to get created") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + vmiDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(vmiDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName)) + Expect(vmiDefaultNetworkStatus.IPs).ToNot(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM foreground deletion, only after VMI is gone", func() { + By("Foreground delete the VM, and validate the IPAMClaim isnt deleted since VMI exists") + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + By("Remove the finalizer (all the other are already deleted in this stage)") + patchData, err := removeFinalizersPatch() + Expect(err).NotTo(HaveOccurred()) + Expect(testenv.Client.Patch(context.TODO(), vmi, client.RawPatch(types.MergePatchType, patchData))).To(Succeed()) + + By("Check IPAMClaims are now deleted") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + + Context("and a virtual machine instance using it is also created", func() { + var originalVMIDefaultNetworkStatus *nadv1.NetworkStatus + var err error + BeforeEach(func() { + By("Creating VMI using the nad") + Expect(testenv.Client.Create(context.Background(), vmi)).To(Succeed()) + + By(fmt.Sprintf("Waiting for readiness at virtual machine instance %s", vmi.Name)) + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(testenv.ContainConditionVMIReady()) + + By("Wait for IPAMClaim to get created") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + originalVMIDefaultNetworkStatus, err = getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(originalVMIDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName)) + Expect(originalVMIDefaultNetworkStatus.IPs).ToNot(BeEmpty()) + }) + + It("should keep ips after live migration", func() { + vmiIPsBeforeMigration := originalVMIDefaultNetworkStatus.IPs + + testenv.LiveMigrateVirtualMachine(td.Namespace, vmi.Name) + testenv.CheckLiveMigrationSucceeded(td.Namespace, vmi.Name) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + targetVMIDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(targetVMIDefaultNetworkStatus.Interface).To(Equal(originalVMIDefaultNetworkStatus.Interface)) + Expect(targetVMIDefaultNetworkStatus.IPs).To(ConsistOf(vmiIPsBeforeMigration)) + }) + + It("should garbage collect IPAMClaims after VMI deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vmi)).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VMI foreground deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vmi, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + + }) +}) + +func getPodByVirtualMachineInstance(vmi *kubevirtv1.VirtualMachineInstance) (*corev1.Pod, error) { + pod, err := lookupPodBySelector(vmi.Namespace, vmiLabelSelector(vmi), vmiFieldSelector(vmi)) + if err != nil { + return nil, fmt.Errorf("failed to find pod for VMI %s (%s)", vmi.Name, string(vmi.GetUID())) + } + return pod, nil +} + +func lookupPodBySelector(namespace string, labelSelector, fieldSelector map[string]string) (*corev1.Pod, error) { + pods := &corev1.PodList{} + err := testenv.Client.List(context.Background(), pods, + client.InNamespace(namespace), + client.MatchingLabels(labelSelector), + client.MatchingFields(fieldSelector)) + if err != nil { + return nil, err + } + + if len(pods.Items) == 0 { + return nil, fmt.Errorf("failed to lookup pod") + } + + return &pods.Items[0], nil +} + +func vmiLabelSelector(vmi *kubevirtv1.VirtualMachineInstance) map[string]string { + return map[string]string{kubevirtv1.CreatedByLabel: string(vmi.GetUID())} +} + +func vmiFieldSelector(vmi *kubevirtv1.VirtualMachineInstance) map[string]string { + fieldSelectors := map[string]string{} + if vmi.Status.Phase == kubevirtv1.Running { + const podPhase = "status.phase" + fieldSelectors[podPhase] = string(corev1.PodRunning) + } + if node := vmi.Status.NodeName; node != "" { + const nodeName = "spec.nodeName" + fieldSelectors[nodeName] = node + } + return fieldSelectors +} + +func parsePodNetworkStatusAnnotation(podNetStatus string) ([]nadv1.NetworkStatus, error) { + if len(podNetStatus) == 0 { + return nil, fmt.Errorf("network status annotation not found") + } + + var netStatus []nadv1.NetworkStatus + if err := json.Unmarshal([]byte(podNetStatus), &netStatus); err != nil { + return nil, err + } + + return netStatus, nil +} + +func getDefaultNetworkStatus(vmi *kubevirtv1.VirtualMachineInstance) (*nadv1.NetworkStatus, error) { + virtLauncherPod, err := getPodByVirtualMachineInstance(vmi) + if err != nil { + return nil, err + } + + netStatuses, err := parsePodNetworkStatusAnnotation(virtLauncherPod.Annotations[nadv1.NetworkStatusAnnot]) + if err != nil { + return nil, err + } + + for _, netStatus := range netStatuses { + if netStatus.Default { + return &netStatus, nil + } + } + return nil, fmt.Errorf("primary IPs not found") +} diff --git a/test/env/generate.go b/test/env/generate.go index 596e72e..9373472 100644 --- a/test/env/generate.go +++ b/test/env/generate.go @@ -97,6 +97,80 @@ func GenerateAlpineWithMultusVMI(namespace, interfaceName, networkName string) * } } +func GenerateAlpineWithPrimaryUDNVMI(namespace string) *kubevirtv1.VirtualMachineInstance { + const interfaceName = "passtnet" + return &kubevirtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: RandomName("alpine", 16), + }, + Spec: kubevirtv1.VirtualMachineInstanceSpec{ + Domain: kubevirtv1.DomainSpec{ + Resources: kubevirtv1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("2048Mi"), + }, + }, + Devices: kubevirtv1.Devices{ + Disks: []kubevirtv1.Disk{ + { + DiskDevice: kubevirtv1.DiskDevice{ + Disk: &kubevirtv1.DiskTarget{ + Bus: kubevirtv1.DiskBusVirtio, + }, + }, + Name: "containerdisk", + }, + }, + Interfaces: []kubevirtv1.Interface{ + { + Name: interfaceName, + Binding: &kubevirtv1.PluginBinding{ + Name: "passt", + }, + }, + }, + }, + }, + Networks: []kubevirtv1.Network{ + { + Name: interfaceName, + NetworkSource: kubevirtv1.NetworkSource{ + Pod: &kubevirtv1.PodNetwork{}, + }, + }, + }, + TerminationGracePeriodSeconds: pointer.Int64(5), + Volumes: []kubevirtv1.Volume{ + { + Name: "containerdisk", + VolumeSource: kubevirtv1.VolumeSource{ + ContainerDisk: &kubevirtv1.ContainerDiskSource{ + Image: "quay.io/kubevirtci/alpine-container-disk-demo:devel_alt", + }, + }, + }, + { + Name: "cloudinitdisk", + VolumeSource: kubevirtv1.VolumeSource{ + CloudInitNoCloud: &kubevirtv1.CloudInitNoCloudSource{ + NetworkData: cloudInitNetworkData(), + }, + }, + }, + }, + }, + } +} + +func cloudInitNetworkData() string { + return ` +version: 2 +ethernets: + eth0: + dhcp4: true` +} + type VMOption func(vm *kubevirtv1.VirtualMachine) func NewVirtualMachine(vmi *kubevirtv1.VirtualMachineInstance, opts ...VMOption) *kubevirtv1.VirtualMachine {