Skip to content

Commit

Permalink
e2e, persistentip: Add tests for primary UDN
Browse files Browse the repository at this point in the history
These tests run practically the same tests as in secondary interfaces,
with a few semantic changes:
- networkInterfaceName is 'ovn-udn1'
- VMI is created with primary UDN
- NAD is created with role: Primary
- interface IPS are extracted using network-status annotation, and not
vmi.status as that is not yet supported for primaryUDN.

Signed-off-by: Ram Lavi <[email protected]>
  • Loading branch information
RamLavi committed Sep 26, 2024
1 parent dc5e8be commit 6ce1cb2
Show file tree
Hide file tree
Showing 2 changed files with 470 additions and 0 deletions.
396 changes: 396 additions & 0 deletions test/e2e/persistentips-primary_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,396 @@
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2024 Red Hat, Inc.
*
*/

package e2e

import (
"context"
"encoding/json"
"fmt"
"os/exec"
"time"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"

nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1"

kubevirtv1 "kubevirt.io/api/core/v1"

testenv "github.com/kubevirt/ipam-extensions/test/env"
"sigs.k8s.io/controller-runtime/pkg/client"
)

var _ = Describe("Persistent IPs on Primary UDN interface", func() {
var failureCount int = 0
JustAfterEach(func() {
if CurrentSpecReport().Failed() {
failureCount++
By(fmt.Sprintf("Test failed, collecting logs and artifacts, failure count %d", failureCount))

logCommand([]string{"get", "pods", "-A"}, "pods", failureCount)
logCommand([]string{"get", "vm", "-A", "-oyaml"}, "vms", failureCount)
logCommand([]string{"get", "vmi", "-A", "-oyaml"}, "vmis", failureCount)
logCommand([]string{"get", "ipamclaims", "-A", "-oyaml"}, "ipamclaims", failureCount)
logOvnPods(failureCount)
}
})

When("network attachment definition created with allowPersistentIPs=true", func() {
var (
td testenv.TestData
networkInterfaceName = "ovn-udn1"
vm *kubevirtv1.VirtualMachine
vmi *kubevirtv1.VirtualMachineInstance
nad *nadv1.NetworkAttachmentDefinition
)
BeforeEach(func() {
td = testenv.GenerateTestData()
td.SetUp()
DeferCleanup(func() {
td.TearDown()
})

nad = testenv.GenerateLayer2WithSubnetNAD(td.Namespace, "primary")
vmi = testenv.GenerateAlpineWithPrimaryUDNVMI(td.Namespace)
vm = testenv.NewVirtualMachine(vmi, testenv.WithRunning())

By("Create NetworkAttachmentDefinition")
Expect(testenv.Client.Create(context.Background(), nad)).To(Succeed())
})
Context("and a virtual machine using it is also created", func() {
var originalVMIDefaultNetworkStatus *nadv1.NetworkStatus
var err error
BeforeEach(func() {
By("Creating VM with primary UDN")
Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed())

By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name))
Eventually(testenv.ThisVMReadiness(vm)).
WithPolling(time.Second).
WithTimeout(5 * time.Minute).
Should(BeTrue())

By("Wait for IPAMClaim to get created")
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
ShouldNot(BeEmpty())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

originalVMIDefaultNetworkStatus, err = getDefaultNetworkStatus(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(originalVMIDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName))
Expect(originalVMIDefaultNetworkStatus.IPs).ToNot(BeEmpty())
})

It("should keep ips after live migration", func() {
vmiIPsBeforeMigration := originalVMIDefaultNetworkStatus.IPs

testenv.LiveMigrateVirtualMachine(td.Namespace, vm.Name)
testenv.CheckLiveMigrationSucceeded(td.Namespace, vm.Name)

By("Wait for VMI to be ready after live migration")
Eventually(testenv.ThisVMI(vmi)).
WithPolling(time.Second).
WithTimeout(5 * time.Minute).
Should(testenv.ContainConditionVMIReady())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

targetVMIDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(targetVMIDefaultNetworkStatus.Interface).To(Equal(originalVMIDefaultNetworkStatus.Interface))
Expect(targetVMIDefaultNetworkStatus.IPs).To(ConsistOf(vmiIPsBeforeMigration))
})

It("should garbage collect IPAMClaims after VM deletion", func() {
Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed())
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})

It("should garbage collect IPAMClaims after VM foreground deletion", func() {
Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed())
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})

When("the VM is stopped", func() {
BeforeEach(func() {
By("Invoking virtctl stop")
output, err := exec.Command("virtctl", "stop", "-n", td.Namespace, vmi.Name).CombinedOutput()
Expect(err).NotTo(HaveOccurred(), output)

By("Ensuring VM is not running")
Eventually(testenv.ThisVMI(vmi), 360*time.Second, 1*time.Second).Should(
SatisfyAll(
Not(testenv.BeCreated()),
Not(testenv.BeReady()),
))

Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
ShouldNot(BeEmpty())
})

It("should garbage collect IPAMClaims after VM is deleted", func() {
By("Delete VM and check ipam claims are gone")
Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed())
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})

It("should garbage collect IPAMClaims after VM is foreground deleted", func() {
By("Foreground delete VM and check ipam claims are gone")
Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed())
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})
})

It("should keep ips after restart", func() {
defaultNetworkStatusBeforeRestart, err := getDefaultNetworkStatus(vmi)
Expect(err).ToNot(HaveOccurred())
vmiIPsBeforeRestart := defaultNetworkStatusBeforeRestart.IPs
vmiUUIDBeforeRestart := vmi.UID

By("Re-starting the VM")
output, err := exec.Command("virtctl", "restart", "-n", td.Namespace, vmi.Name).CombinedOutput()
Expect(err).NotTo(HaveOccurred(), output)

By("Wait for a new VMI to be re-started")
Eventually(testenv.ThisVMI(vmi)).
WithPolling(time.Second).
WithTimeout(90 * time.Second).
Should(testenv.BeRestarted(vmiUUIDBeforeRestart))

By("Wait for VMI to be ready after restart")
Eventually(testenv.ThisVMI(vmi)).
WithPolling(time.Second).
WithTimeout(5 * time.Minute).
Should(testenv.ContainConditionVMIReady())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

defaultNetworkStatusAfterRestart, err := getDefaultNetworkStatus(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(defaultNetworkStatusAfterRestart.Interface).To(Equal(defaultNetworkStatusBeforeRestart.Interface))
Expect(defaultNetworkStatusAfterRestart.IPs).To(ConsistOf(vmiIPsBeforeRestart))
})
})

When("requested for a VM whose VMI has extra finalizers", func() {
const testFinalizer = "testFinalizer"

BeforeEach(func() {
By("Adding VMI custom finalizer to control VMI deletion")
vm.Spec.Template.ObjectMeta.Finalizers = []string{testFinalizer}

By("Creating VM with secondary attachments")
Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed())

By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name))
Eventually(testenv.ThisVMReadiness(vm)).
WithPolling(time.Second).
WithTimeout(5 * time.Minute).
Should(BeTrue())

By("Wait for IPAMClaim to get created")
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
ShouldNot(BeEmpty())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

vmiDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(vmiDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName))
Expect(vmiDefaultNetworkStatus.IPs).ToNot(BeEmpty())
})

It("should garbage collect IPAMClaims after VM foreground deletion, only after VMI is gone", func() {
By("Foreground delete the VM, and validate the IPAMClaim isnt deleted since VMI exists")
Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed())
Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
ShouldNot(BeEmpty())

By("Remove the finalizer (all the other are already deleted in this stage)")
patchData, err := removeFinalizersPatch()
Expect(err).NotTo(HaveOccurred())
Expect(testenv.Client.Patch(context.TODO(), vmi, client.RawPatch(types.MergePatchType, patchData))).To(Succeed())

By("Check IPAMClaims are now deleted")
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})
})

Context("and a virtual machine instance using it is also created", func() {
var originalVMIDefaultNetworkStatus *nadv1.NetworkStatus
var err error
BeforeEach(func() {
By("Creating VMI using the nad")
Expect(testenv.Client.Create(context.Background(), vmi)).To(Succeed())

By(fmt.Sprintf("Waiting for readiness at virtual machine instance %s", vmi.Name))
Eventually(testenv.ThisVMI(vmi)).
WithPolling(time.Second).
WithTimeout(5 * time.Minute).
Should(testenv.ContainConditionVMIReady())

By("Wait for IPAMClaim to get created")
Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
ShouldNot(BeEmpty())

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

originalVMIDefaultNetworkStatus, err = getDefaultNetworkStatus(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(originalVMIDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName))
Expect(originalVMIDefaultNetworkStatus.IPs).ToNot(BeEmpty())
})

It("should keep ips after live migration", func() {
vmiIPsBeforeMigration := originalVMIDefaultNetworkStatus.IPs

testenv.LiveMigrateVirtualMachine(td.Namespace, vmi.Name)
testenv.CheckLiveMigrationSucceeded(td.Namespace, vmi.Name)

Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed())

targetVMIDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi)
Expect(err).ToNot(HaveOccurred())
Expect(targetVMIDefaultNetworkStatus.Interface).To(Equal(originalVMIDefaultNetworkStatus.Interface))
Expect(targetVMIDefaultNetworkStatus.IPs).To(ConsistOf(vmiIPsBeforeMigration))
})

It("should garbage collect IPAMClaims after VMI deletion", func() {
Expect(testenv.Client.Delete(context.Background(), vmi)).To(Succeed())
Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})

It("should garbage collect IPAMClaims after VMI foreground deletion", func() {
Expect(testenv.Client.Delete(context.Background(), vmi, foregroundDeleteOptions())).To(Succeed())
Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)).
WithTimeout(time.Minute).
WithPolling(time.Second).
Should(BeEmpty())
})
})

})
})

func getPodByVirtualMachineInstance(vmi *kubevirtv1.VirtualMachineInstance) (*corev1.Pod, error) {
pod, err := lookupPodBySelector(vmi.Namespace, vmiLabelSelector(vmi), vmiFieldSelector(vmi))
if err != nil {
return nil, fmt.Errorf("failed to find pod for VMI %s (%s)", vmi.Name, string(vmi.GetUID()))
}
return pod, nil
}

func lookupPodBySelector(namespace string, labelSelector, fieldSelector map[string]string) (*corev1.Pod, error) {
pods := &corev1.PodList{}
err := testenv.Client.List(context.Background(), pods,
client.InNamespace(namespace),
client.MatchingLabels(labelSelector),
client.MatchingFields(fieldSelector))
if err != nil {
return nil, err
}

if len(pods.Items) == 0 {
return nil, fmt.Errorf("failed to lookup pod")
}

return &pods.Items[0], nil
}

func vmiLabelSelector(vmi *kubevirtv1.VirtualMachineInstance) map[string]string {
return map[string]string{kubevirtv1.CreatedByLabel: string(vmi.GetUID())}
}

func vmiFieldSelector(vmi *kubevirtv1.VirtualMachineInstance) map[string]string {
fieldSelectors := map[string]string{}
if vmi.Status.Phase == kubevirtv1.Running {
const podPhase = "status.phase"
fieldSelectors[podPhase] = string(corev1.PodRunning)
}
if node := vmi.Status.NodeName; node != "" {
const nodeName = "spec.nodeName"
fieldSelectors[nodeName] = node
}
return fieldSelectors
}

func parsePodNetworkStatusAnnotation(podNetStatus string) ([]nadv1.NetworkStatus, error) {
if len(podNetStatus) == 0 {
return nil, fmt.Errorf("network status annotation not found")
}

var netStatus []nadv1.NetworkStatus
if err := json.Unmarshal([]byte(podNetStatus), &netStatus); err != nil {
return nil, err
}

return netStatus, nil
}

func getDefaultNetworkStatus(vmi *kubevirtv1.VirtualMachineInstance) (*nadv1.NetworkStatus, error) {
virtLauncherPod, err := getPodByVirtualMachineInstance(vmi)
if err != nil {
return nil, err
}

netStatuses, err := parsePodNetworkStatusAnnotation(virtLauncherPod.Annotations[nadv1.NetworkStatusAnnot])
if err != nil {
return nil, err
}

for _, netStatus := range netStatuses {
if netStatus.Default {
return &netStatus, nil
}
}
return nil, fmt.Errorf("primary IPs not found")
}
Loading

0 comments on commit 6ce1cb2

Please sign in to comment.