From 1ba00d8f3877d69c97dc89e72c43c29063a5fcc5 Mon Sep 17 00:00:00 2001 From: Julia Graham Date: Tue, 8 Oct 2024 17:16:03 -0400 Subject: [PATCH] Multi_vms_with_stress: Add a test about starting VMs with stress workload on host This PR adds: VIRT-301893 - [aarch64 only] Start VMs with maximum vcpus and stress on host Signed-off-by: Julia Graham --- .../multivm_stress/multi_vms_with_stress.cfg | 5 ++ .../multivm_stress/multi_vms_with_stress.py | 82 +++++++++++++++++++ 2 files changed, 87 insertions(+) create mode 100644 libvirt/tests/cfg/multivm_stress/multi_vms_with_stress.cfg create mode 100644 libvirt/tests/src/multivm_stress/multi_vms_with_stress.py diff --git a/libvirt/tests/cfg/multivm_stress/multi_vms_with_stress.cfg b/libvirt/tests/cfg/multivm_stress/multi_vms_with_stress.cfg new file mode 100644 index 0000000000..128fca6b24 --- /dev/null +++ b/libvirt/tests/cfg/multivm_stress/multi_vms_with_stress.cfg @@ -0,0 +1,5 @@ +- multi_vms_with_stress: + type = multi_vms_with_stress + memory = 4194304 + vm_names = vm2 vm3 + stress_args = '--cpu 4 --io 4 --vm 2 --vm-bytes 128M &' diff --git a/libvirt/tests/src/multivm_stress/multi_vms_with_stress.py b/libvirt/tests/src/multivm_stress/multi_vms_with_stress.py new file mode 100644 index 0000000000..fec03d42e7 --- /dev/null +++ b/libvirt/tests/src/multivm_stress/multi_vms_with_stress.py @@ -0,0 +1,82 @@ +import logging as log + +from avocado.utils import cpu + +from virttest import utils_test +from virttest.libvirt_xml import vm_xml + +LOG = log.getLogger('avocado.' + __name__) + + +def run(test, params, env): + """ + Test that multiple vms can start when stress workload is running on the host. + + Steps: + 1. Prepare 3 vms that each have even vcpu number around 2/3 of # host_online_cpu + 2. Start stress workload on the host + 3. Start all vms and verify vms could be logged in normally + 4. Verify all vms could be gracefully shutdown successfully + """ + memory = params.get("memory", "4194304") + main_vm_name = params.get("main_vm") + main_vm = env.get_vm(main_vm_name) + vm_names = params.get("vm_names").split() + vms = [main_vm] + vmxml_backups = [] + + # Get vms + for i, vm_name in enumerate(vm_names): + vms.append(main_vm.clone(vm_name)) + + for vm in vms: + # Back up domain XMLs + vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) + vmxml_backups.append(vmxml.copy()) + # Increase memory + vmxml.memory = int(memory) + vmxml.current_mem = int(memory) + vmxml.sync() + + try: + # Get host online cpu number + host_online_cpus = cpu.online_count() + LOG.debug("Host online CPU number: %s", str(host_online_cpus)) + + # Prepare 3 vms and each vm has even vcpus number which is about 2/3 of # host_online_cpu + for i, vm in enumerate(vms): + if vm.is_alive(): + vm.destroy() + vcpus_num = host_online_cpus * 2 // int(len(vms)) + if (vcpus_num % 2 != 0): + vcpus_num += 1 + vm_xml.VMXML.new_from_inactive_dumpxml(vm.name).set_vm_vcpus(vm.name, vcpus_num, vcpus_num, topology_correction=True) + LOG.debug("Defined vm %s with '%s' vcpu(s)", vm.name, str(vcpus_num)) + + # Start stress workload on the host + # params must include stress_args + utils_test.load_stress("stress_on_host", params=params) + + # Start all vms and verify vms could be logged in normally + for vm in vms: + vm.prepare_guest_agent() + vm.wait_for_login() + if (vm.state() != "running"): + test.fail("VM %s should be running, not %s" % (vm.name, vm.state())) + + # Verify all vms could be gracefully shutdown successfully + for vm in vms: + vm.shutdown() + if (vm.state() != "shut off"): + test.fail("VM %s should be shut off, not %s" % (vm.name, vm.state())) + + finally: + # Stop stress workload + utils_test.unload_stress("stress_on_host", params=params) + + # Recover VMs + for i, vm in enumerate(vms): + if vm.is_alive(): + vm.destroy(gracefully=False) + LOG.info("Restoring vm %s...", vm.name) + vmxml_backups[i].sync()