diff --git a/.github/workflows/test-sequences.yml b/.github/workflows/test-sequences.yml new file mode 100644 index 000000000..3761c6af6 --- /dev/null +++ b/.github/workflows/test-sequences.yml @@ -0,0 +1,26 @@ +name: Check test-sequences consistency + +on: [push] + +jobs: + jobs-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v4 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements/base.txt + - name: Create a dummy data.py + run: cp data.py-dist data.py + - name: jobs-check + run: | + FAILURES="" + for seq in $(find -name "*.lst"); do + if ! pytest @$seq --collect-only --quiet; then + FAILURES="$FAILURES $seq" + fi + done + [ -z "$FAILURES" ] || { echo >&2 "ERROR: test sequences failed consistency check: $FAILURES"; exit 1; } diff --git a/conftest.py b/conftest.py index 3c176a49c..2326774c1 100644 --- a/conftest.py +++ b/conftest.py @@ -1,5 +1,7 @@ import itertools +import git import logging +import os import pytest import tempfile @@ -7,11 +9,15 @@ import lib.config as global_config +from lib import pxe +from lib.common import callable_marker, shortened_nodeid from lib.common import wait_for, vm_image, is_uuid +from lib.common import prefix_object_name from lib.common import setup_formatted_and_mounted_disk, teardown_formatted_and_mounted_disk from lib.netutil import is_ipv6 from lib.pool import Pool -from lib.vm import VM +from lib.sr import SR +from lib.vm import VM, vm_cache_key_from_def from lib.xo import xo_cli # Import package-scoped fixtures. Although we need to define them in a separate file so that we can @@ -29,6 +35,12 @@ # pytest hooks def pytest_addoption(parser): + parser.addoption( + "--nest", + action="store", + default=None, + help="XCP-ng or XS master of pool to use for nesting hosts under test", + ) parser.addoption( "--hosts", action="append", @@ -136,22 +148,71 @@ def pytest_runtest_makereport(item, call): # fixtures -def setup_host(hostname_or_ip): - pool = Pool(hostname_or_ip) - h = pool.master - return h - @pytest.fixture(scope='session') def hosts(pytestconfig): + nested_list = [] + + def setup_host(hostname_or_ip, *, config=None): + host_vm = None + if hostname_or_ip.startswith("cache://"): + nest_hostname = config.getoption("nest") + if not nest_hostname: + pytest.fail("--hosts=cache://... requires --nest parameter") + nest = Pool(nest_hostname).master + + protocol, rest = hostname_or_ip.split(":", 1) + host_vm = nest.import_vm(f"clone:{rest}", nest.main_sr_uuid(), + use_cache=True) + nested_list.append(host_vm) + + vif = host_vm.vifs()[0] + mac_address = vif.param_get('MAC') + logging.info("Nested host has MAC %s", mac_address) + + host_vm.start() + wait_for(host_vm.is_running, "Wait for nested host VM running") + + # catch host-vm IP address + wait_for(lambda: pxe.arp_addresses_for(mac_address), + "Wait for DHCP server to see nested host in ARP tables", + timeout_secs=10 * 60) + ips = pxe.arp_addresses_for(mac_address) + logging.info("Nested host has IPs %s", ips) + assert len(ips) == 1 + host_vm.ip = ips[0] + + wait_for(lambda: not os.system(f"nc -zw5 {host_vm.ip} 22"), + "Wait for ssh up on nested host", retry_delay_secs=5) + + hostname_or_ip = host_vm.ip + + pool = Pool(hostname_or_ip) + h = pool.master + return h + + def cleanup_hosts(): + for vm in nested_list: + logging.info("Destroying nested host VM %s", vm.uuid) + vm.destroy(verify=True) + # a list of master hosts, each from a different pool hosts_args = pytestconfig.getoption("hosts") hosts_split = [hostlist.split(',') for hostlist in hosts_args] hostname_list = list(itertools.chain(*hosts_split)) - host_list = [setup_host(hostname_or_ip) for hostname_or_ip in hostname_list] + + try: + host_list = [setup_host(hostname_or_ip, config=pytestconfig) + for hostname_or_ip in hostname_list] + except Exception: + cleanup_hosts() + raise + if not host_list: pytest.fail("This test requires at least one --hosts parameter") yield host_list + cleanup_hosts() + @pytest.fixture(scope='session') def registered_xo_cli(): # The fixture is not responsible for establishing the connection. @@ -375,6 +436,172 @@ def imported_vm(host, vm_ref): logging.info("<< Destroy VM") vm.destroy(verify=True) +@pytest.fixture(scope="session") +def tests_git_revision(): + """ + Get the git revision string for this tests repo. + + Use of this fixture means impacted tests cannot run unless all + modifications are commited. + """ + test_repo = git.Repo(".") + assert not test_repo.is_dirty(), "test repo must not be dirty" + yield test_repo.head.commit.hexsha + +@pytest.fixture(scope="function") +def create_vms(request, host, tests_git_revision): + """ + Returns list of VM objects created from `vm_definitions` marker. + + `vm_definitions` marker test author to specify one or more VMs, by + giving for each VM one `dict`, or a callable taking fixtures as + arguments and returning such a `dict`. + + Mandatory keys: + - `name`: name of the VM to create (str) + - `template`: name (or UUID) of template to use (str) + + Optional keys: see example below + + Example: + ------- + > @pytest.mark.vm_definitions( + > dict(name="vm1", template="Other install media"), + > dict(name="vm2", + > template="CentOS 7", + > params=( + > dict(param_name="memory-static-max", value="4GiB"), + > dict(param_name="HVM-boot-params", key="order", value="dcn"), + > ), + > vdis=[dict(name="vm 2 system disk", + > size="100GiB", + > device="xvda", + > userdevice="0", + > )], + > cd_vbd=dict(device="xvdd", userdevice="3"), + > vifs=(dict(index=0, network_name=NETWORKS["MGMT"]), + > dict(index=1, network_uuid=NETWORKS["MYNET_UUID"]), + > ), + > )) + > def test_foo(create_vms): + > ... + + Example: + ------- + > @pytest.mark.dependency(depends=["test_foo"]) + > @pytest.mark.vm_definitions(dict(name="vm1", image_test="test_foo", image_vm="vm2")) + > def test_bar(create_vms): + > ... + + """ + marker = request.node.get_closest_marker("vm_definitions") + if marker is None: + raise Exception("No vm_definitions marker specified.") + + vm_defs = [] + for vm_def in marker.args: + vm_def = callable_marker(vm_def, request) + assert "name" in vm_def + assert "template" in vm_def or "image_test" in vm_def + if "template" in vm_def: + assert "image_test" not in vm_def + # FIXME should check optional vdis contents + # FIXME should check for extra args + vm_defs.append(vm_def) + + try: + vms = [] + vdis = [] + vbds = [] + for vm_def in vm_defs: + if "template" in vm_def: + _create_vm(request, vm_def, host, vms, vdis, vbds) + elif "image_test" in vm_def: + _vm_from_cache(request, vm_def, host, vms, tests_git_revision) + yield vms + + # request.node is an "item" because this fixture has "function" scope + report = request.node.stash.get(PHASE_REPORT_KEY, None) + if report is None: + # user interruption during setup + logging.warning("test setup result not available: not exporting VMs") + elif report["setup"].failed: + logging.warning("setting up a test failed or skipped: not exporting VMs") + elif ("call" not in report) or report["call"].failed: + logging.warning("executing test failed or skipped: not exporting VMs") + else: + # record this state + for vm_def, vm in zip(vm_defs, vms): + nodeid = shortened_nodeid(request.node.nodeid) + vm.save_to_cache(f"{nodeid}-{vm_def['name']}-{tests_git_revision}") + + except Exception: + logging.error("exception caught...") + raise + + finally: + for vbd in vbds: + logging.info("<< Destroy VBD %s", vbd.uuid) + vbd.destroy() + for vdi in vdis: + logging.info("<< Destroy VDI %s", vdi.uuid) + vdi.destroy() + for vm in vms: + logging.info("<< Destroy VM %s", vm.uuid) + vm.destroy(verify=True) + +def _vm_name(request, vm_def): + return prefix_object_name(f"{vm_def['name']} in {request.node.nodeid}") + +def _create_vm(request, vm_def, host, vms, vdis, vbds): + vm_name = _vm_name(request, vm_def) + vm_template = vm_def["template"] + + logging.info("Installing VM %r from template %r", vm_name, vm_template) + + vm = host.vm_from_template(vm_name, vm_template) + + # VM is now created, make sure we clean it up on any subsequent failure + vms.append(vm) + + if "vdis" in vm_def: + for vdi_def in vm_def["vdis"]: + sr = SR(host.main_sr_uuid(), host.pool) + vdi = sr.create_vdi(vdi_def["name"], vdi_def["size"]) + vdis.append(vdi) + # connect to VM + vbd = vm.create_vbd(vdi_def["device"], vdi.uuid) + vbds.append(vbd) + vbd.param_set(param_name="userdevice", value=vdi_def["userdevice"]) + + if "cd_vbd" in vm_def: + vm.create_cd_vbd(**vm_def["cd_vbd"]) + + if "vifs" in vm_def: + for vif_def in vm_def["vifs"]: + vm.create_vif(vif_def["index"], + network_uuid=vif_def.get("network_uuid", None), + network_name=vif_def.get("network_name", None)) + + if "params" in vm_def: + for param_def in vm_def["params"]: + logging.info("Setting param %s", param_def) + vm.param_set(**param_def) + +def _vm_from_cache(request, vm_def, host, vms, tests_hexsha): + base_vm = host.cached_vm(vm_cache_key_from_def(vm_def, request.node.nodeid, tests_hexsha), + sr_uuid=host.main_sr_uuid()) + if base_vm is None: + raise RuntimeError("No cache found") + + # Clone the VM before running tests, so that the original VM remains untouched + logging.info("Cloning VM from cache") + vm = base_vm.clone(name=_vm_name(request, vm_def)) + # Remove the description, which may contain a cache identifier + vm.param_set('name-description', "") + + vms.append(vm) + @pytest.fixture(scope="module") def started_vm(imported_vm): vm = imported_vm diff --git a/data.py-dist b/data.py-dist index a1bc8d3ad..4eda37b95 100644 --- a/data.py-dist +++ b/data.py-dist @@ -6,6 +6,20 @@ HOST_DEFAULT_USER = "root" HOST_DEFAULT_PASSWORD = "" +import crypt +def hash_password(password): + "Hash password for /etc/password" + salt = crypt.mksalt(crypt.METHOD_SHA512) + return crypt.crypt(password, salt) + +HOST_DEFAULT_PASSWORD_HASH = hash_password(HOST_DEFAULT_PASSWORD) + +# Public key for a private key available to the test runner +TEST_SSH_PUBKEY = """ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMnN/wVdQqHA8KsndfrLS7fktH/IEgxoa533efuXR6rw XCP-ng CI +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKz9uQOoxq6Q0SQ0XTzQHhDolvuo/7EyrDZsYQbRELhcPJG8MT/o5u3HyJFhIP2+HqBSXXgmqRPJUkwz9wUwb2sUwf44qZm/pyPUWOoxyVtrDXzokU/uiaNKUMhbnfaXMz6Ogovtjua63qld2+ZRXnIgrVtYKtYBeu/qKGVSnf4FTOUKl1w3uKkr59IUwwAO8ay3wVnxXIHI/iJgq6JBgQNHbn3C/SpYU++nqL9G7dMyqGD36QPFuqH/cayL8TjNZ67TgAzsPX8OvmRSqjrv3KFbeSlpS/R4enHkSemhgfc8Z2f49tE7qxWZ6x4Uyp5E6ur37FsRf/tEtKIUJGMRXN XCP-ng CI +""" + # The following prefix will be added to the `name-label` parameter of XAPI objects # that the tests will create or import, such as VMs and SRs. # Default value: [your login/user] @@ -20,18 +34,66 @@ HOSTS = { # "testhost1": {"user": "root", "password": "", 'skip_xo_config': True}, } +NETWORKS = { + "MGMT": "Pool-wide network associated with eth0", +} + # PXE config server for automated XCP-ng installation PXE_CONFIG_SERVER = 'pxe' # Default VM images location DEF_VM_URL = 'http://pxe/images/' +# Tools +TOOLS = { +# "iso-remaster": "/home/user/src/xcpng/xcp/scripts/iso-remaster/iso-remaster.sh", +} + # Values can be either full URLs or only partial URLs that will be automatically appended to DEF_VM_URL VM_IMAGES = { 'mini-linux-x86_64-bios': 'alpine-minimal-3.12.0.xva', 'mini-linux-x86_64-uefi': 'alpine-uefi-minimal-3.12.0.xva' } +ISO_IMAGES_BASE = "https://updates.xcp-ng.org/isos/" +ISO_IMAGES_CACHE = "/home/user/iso" +# ISO_IMAGES path can be: +# - absolute filename +# - absolute URL +# - path relative to ISO_IMAGES_BASE URL +# Note the dirname part is ignored when looking in ISO_IMAGES_CACHE, abuse this +# for local-only ISO with things like "locally-built/my.iso" or "xs/8.3.iso". +# If 'net-only' is set to 'True' only source of type URL will be possible. +# By default the parameter is set to False. +ISO_IMAGES = { +# '83nightly': {'path': "http://pxe/isos/xcp-ng-8.3-ci-latest"}, +# # FIXME: no such symlimk + useless without 'net-url' +# #'83nightlynet': {'path': "http://pxe/isos/xcp-ng-8.3-ci-netinstall-latest"}, +# # 'net-url': 'fake", +# # 'net-only': True}, +# '83rc1': {'path': "8.3/xcp-ng-8.3.0-rc1.iso", +# 'net-url': "http://server/installers/xcp-ng/8.3-rc1"}, +# # FIXME: only a compensation for the lack of 83nightlynet +# '83rcnet': {'path': "8.3/xcp-ng-8.3.0-rc1-netinstall.iso", +# 'net-url': "http://server/installers/xcp-ng/8.3-rc1", +# 'net-only': True}, +# '83b2': {'path': "8.3/xcp-ng-8.3.0-beta2.iso", +# 'net-url': "http://server/installers/xcp-ng/8.3-beta2"}, +# '83b1': {'path': "8.3/xcp-ng-8.3.0-beta1.iso", +# 'net-url': "http://server/installers/xcp-ng/8.3-beta1"}, +# '821.1': {'path': "8.2/xcp-ng-8.2.1-20231130.iso", +# 'net-url': f"http://{PXE_CONFIG_SERVER}/installers/xcp-ng/8.2.1-refreshed/"}, +# '821': {'path': "8.2/xcp-ng-8.2.1.iso"}, +# '820': {'path': "8.2/xcp-ng-8.2.0.iso"}, +# '81': {'path': "8.1/xcp-ng-8.1.0-2.iso"}, +# '80': {'path': "8.0/xcp-ng-8.0.0.iso"}, +# '76': {'path': "7.6/xcp-ng-7.6.0.iso"}, +# '75': {'path': "7.5/xcp-ng-7.5.0-2.iso"}, +# 'xs8': {'path': "XenServer8_2024-03-18.iso"}, +# 'ch821.1': {'path': "CitrixHypervisor-8.2.1-2306-install-cd.iso"}, +# 'ch821': {'path': "CitrixHypervisor-8.2.1-install-cd.iso"}, +} + # In some cases, we may prefer to favour a local SR to store test VM disks, # to avoid latency or unstabilities related to network or shared file servers. # However it's not good practice to make a local SR the default SR for a pool of several hosts. @@ -98,6 +160,32 @@ LVMOISCSI_DEVICE_CONFIG = { # 'SCSIid': 'id' } +BASE_ANSWERFILES = dict( + INSTALL={ + "TAG": "installation", + "CONTENTS": ( + {"TAG": "root-password", + "type": "hash", + "CONTENTS": HOST_DEFAULT_PASSWORD_HASH}, + {"TAG": "timezone", + "CONTENTS": "Europe/Paris"}, + {"TAG": "keymap", + "CONTENTS": "us"}, + ), + }, + UPGRADE={ + "TAG": "installation", + "mode": "upgrade", + }, + RESTORE={ + "TAG": "restore", + }, +) + +IMAGE_EQUIVS = { +# 'install.test::Nested::install[bios-83rc1-ext]-vm1-607cea0c825a4d578fa5fab56978627d8b2e28bb': 'install.test::Nested::install[bios-83rc1-ext]-vm1-addb4ead4da49856e1d2fb3ddf4e31027c6b693b', +} + # compatibility settings for older tests DEFAULT_NFS_DEVICE_CONFIG = NFS_DEVICE_CONFIG DEFAULT_NFS4_DEVICE_CONFIG = NFS4_DEVICE_CONFIG diff --git a/jobs.py b/jobs.py index c4bd391e2..47a29ee9c 100755 --- a/jobs.py +++ b/jobs.py @@ -29,6 +29,7 @@ "tests/system", "tests/xapi", "tests/xapi-plugins", + "tests/install/test_fixtures.py", ], "markers": "(small_vm or no_vm) and not flaky and not reboot and not complex_prerequisites", }, diff --git a/lib/basevm.py b/lib/basevm.py index f82f2097a..ac98b7613 100644 --- a/lib/basevm.py +++ b/lib/basevm.py @@ -83,7 +83,7 @@ def get_sr(self): assert sr.attached_to_host(self.host) return sr - def export(self, filepath, compress='none'): + def export(self, filepath, compress='none', use_cache=False): logging.info("Export VM %s to %s with compress=%s" % (self.uuid, filepath, compress)) params = { 'uuid': self.uuid, diff --git a/lib/common.py b/lib/common.py index d840304f2..3eacab94d 100644 --- a/lib/common.py +++ b/lib/common.py @@ -1,12 +1,17 @@ import getpass import inspect +import itertools import logging +import os import sys import time import traceback from enum import Enum from uuid import UUID +import pytest +import requests + import lib.commands as commands class PackageManagerEnum(Enum): @@ -30,6 +35,51 @@ def prefix_object_name(label): name_prefix = f"[{getpass.getuser()}]" return f"{name_prefix} {label}" +def shortened_nodeid(nodeid): + components = nodeid.split("::") + # module + components[0] = strip_prefix(components[0], "tests/") + components[0] = strip_suffix(components[0], ".py") + components[0] = components[0].replace("/", ".") + # function + components[-1] = strip_prefix(components[-1], "test_") + # class + if len(components) > 2: + components[1] = strip_prefix(components[1], "Test") + + return "::".join(components) + +def expand_scope_relative_nodeid(scoped_nodeid, scope, ref_nodeid): + if scope == 'session' or scope == 'package': + base = () + elif scope == 'module': + base = ref_nodeid.split("::", 1)[:1] + elif scope == 'class': + base = ref_nodeid.split("::", 2)[:2] + else: + raise RuntimeError(f"Internal error: invalid scope {scope!r}") + logging.debug("scope: %r base: %r relative: %r", scope, base, scoped_nodeid) + return "::".join(itertools.chain(base, (scoped_nodeid,))) + +def callable_marker(value, request): + """ + Process value optionally generated by fixture-dependent callable. + + Typically useful for fixtures using pytest markers on parametrized tests. + + Such markers as parameter one value, or a callable that will + return a value. The callable may take as parameters any subset of + the fixture names the test itself uses. + """ + if callable(value): + try: + params = {arg_name: request.getfixturevalue(arg_name) + for arg_name in inspect.getfullargspec(value).args} + except pytest.FixtureLookupError as e: + raise RuntimeError("fixture in mapping not found on test") from e + value = value(**params) + return value + def wait_for(fn, msg=None, timeout_secs=2 * 60, retry_delay_secs=2, invert=False): if msg is not None: logging.info(msg) @@ -154,6 +204,15 @@ def strtobool(str): return False raise ValueError("invalid truth value '{}'".format(str)) +def url_download(url: str, filename: str) -> None: + r = requests.get(url, stream=True) + r.raise_for_status() + tempfilename = filename + ".part" + with open(tempfilename, 'wb') as fd: + for chunk in r.iter_content(chunk_size=128): + fd.write(chunk) + os.rename(tempfilename, filename) + def _param_get(host, xe_prefix, uuid, param_name, key=None, accept_unknown_key=False): """ Common implementation for param_get. """ args = {'uuid': uuid, 'param-name': param_name} diff --git a/lib/host.py b/lib/host.py index dad43cb2d..23be3187c 100644 --- a/lib/host.py +++ b/lib/host.py @@ -236,13 +236,28 @@ def cached_vm(self, uri, sr_uuid): if not vm.vdi_uuids() or vm.get_sr().uuid == sr_uuid: logging.info(f"Reusing cached VM {vm.uuid} for {uri}") return vm - logging.info("Could not find a VM in cache with key %r", cache_key) + logging.info("Could not find a VM in cache for %r", uri) def import_vm(self, uri, sr_uuid=None, use_cache=False): + vm = None if use_cache: - vm = self.cached_vm(uri, sr_uuid) + if '://' in uri and uri.startswith("clone"): + protocol, rest = uri.split(":", 1) + assert rest.startswith("//") + filename = rest[2:] # strip "//" + base_vm = self.cached_vm(filename, sr_uuid) + if base_vm: + vm = base_vm.clone() + vm.param_clear('name-description') + if uri.startswith("clone+start"): + vm.start() + wait_for(vm.is_running, "Wait for VM running") + else: + vm = self.cached_vm(uri, sr_uuid) if vm: return vm + else: + assert not ('://' in uri and uri.startswith("clone")), "clone URIs require cache enabled" params = {} msg = "Import VM %s" % uri @@ -267,6 +282,15 @@ def import_vm(self, uri, sr_uuid=None, use_cache=False): vm.param_set('name-description', cache_key) return vm + def vm_from_template(self, name, template): + params = { + "new-name-label": prefix_object_name(name), + "template": template, + "sr-uuid": self.main_sr_uuid(), + } + vm_uuid = self.xe('vm-install', params) + return VM(vm_uuid, self) + def pool_has_vm(self, vm_uuid, vm_type='vm'): if vm_type == 'snapshot': return self.xe('snapshot-list', {'uuid': vm_uuid}, minimal=True) == vm_uuid diff --git a/lib/installer.py b/lib/installer.py new file mode 100644 index 000000000..b2e00e3bf --- /dev/null +++ b/lib/installer.py @@ -0,0 +1,164 @@ +import logging +import time +import xml.etree.ElementTree as ET + +from lib.commands import ssh, SSHCommandFailed +from lib.common import wait_for + +class AnswerFile: + def __init__(self, kind, /): + from data import BASE_ANSWERFILES + defn = BASE_ANSWERFILES[kind] + self.defn = self._normalize_structure(defn) + + def write_xml(self, filename): + etree = ET.ElementTree(self._defn_to_xml_et(self.defn)) + etree.write(filename) + + # chainable mutators for lambdas + + def top_append(self, *defs): + for defn in defs: + self.defn['CONTENTS'].append(self._normalize_structure(defn)) + return self + + def top_setattr(self, attrs): + assert 'CONTENTS' not in attrs + self.defn.update(attrs) + return self + + # makes a mutable deep copy of all `contents` + @staticmethod + def _normalize_structure(defn): + assert isinstance(defn, dict) + assert 'TAG' in defn + defn = dict(defn) + if 'CONTENTS' not in defn: + defn['CONTENTS'] = [] + if not isinstance(defn['CONTENTS'], str): + defn['CONTENTS'] = [AnswerFile._normalize_structure(item) + for item in defn['CONTENTS']] + return defn + + # convert to a ElementTree.Element tree suitable for further + # modification before we serialize it to XML + @staticmethod + def _defn_to_xml_et(defn, /, *, parent=None): + assert isinstance(defn, dict) + defn = dict(defn) + name = defn.pop('TAG') + assert isinstance(name, str) + contents = defn.pop('CONTENTS', ()) + assert isinstance(contents, (str, list)) + element = ET.Element(name, **defn) + if parent is not None: + parent.append(element) + if isinstance(contents, str): + element.text = contents + else: + for contents in contents: + AnswerFile._defn_to_xml_et(contents, parent=element) + return element + +def poweroff(ip): + try: + ssh(ip, ["poweroff"]) + except SSHCommandFailed as e: + # ignore connection closed by reboot + if e.returncode == 255 and "closed by remote host" in e.stdout: + logging.info("sshd closed the connection") + pass + else: + raise + +def monitor_install(*, ip): + # wait for "yum install" phase to finish + wait_for(lambda: ssh(ip, ["grep", + "'DISPATCH: NEW PHASE: Completing installation'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for rpm installation to succeed", + timeout_secs=40 * 60) # FIXME too big + + # wait for install to finish + wait_for(lambda: ssh(ip, ["grep", + "'The installation completed successfully'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for system installation to succeed", + timeout_secs=40 * 60) # FIXME too big + + wait_for(lambda: ssh(ip, ["ps a|grep '[0-9]. python /opt/xensource/installer/init'"], + check=False, simple_output=False, + ).returncode == 1, + "Wait for installer to terminate") + +def monitor_upgrade(*, ip): + # wait for "yum install" phase to start + wait_for(lambda: ssh(ip, ["grep", + "'DISPATCH: NEW PHASE: Reading package information'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for upgrade preparations to finish", + timeout_secs=40 * 60) # FIXME too big + + # wait for "yum install" phase to finish + wait_for(lambda: ssh(ip, ["grep", + "'DISPATCH: NEW PHASE: Completing installation'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for rpm installation to succeed", + timeout_secs=40 * 60) # FIXME too big + + # wait for install to finish + wait_for(lambda: ssh(ip, ["grep", + "'The installation completed successfully'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for system installation to succeed", + timeout_secs=40 * 60) # FIXME too big + + wait_for(lambda: ssh(ip, ["ps a|grep '[0-9]. python /opt/xensource/installer/init'"], + check=False, simple_output=False, + ).returncode == 1, + "Wait for installer to terminate") + +def monitor_restore(*, ip): + # wait for "yum install" phase to start + wait_for(lambda: ssh(ip, ["grep", + "'Restoring backup'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for data restoration to start", + timeout_secs=40 * 60) # FIXME too big + + # wait for "yum install" phase to finish + wait_for(lambda: ssh(ip, ["grep", + "'Data restoration complete. About to re-install bootloader.'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for data restoration to complete", + timeout_secs=40 * 60) # FIXME too big + + # The installer will not terminate in restore mode, it + # requires human interaction and does not even log it, so + # wait for last known action log (tested with 8.3b2) + wait_for(lambda: ssh(ip, ["grep", + "'ran .*swaplabel.*rc 0'", + "/tmp/install-log"], + check=False, simple_output=False, + ).returncode == 0, + "Wait for installer to hopefully finish", + timeout_secs=40 * 60) # FIXME too big + + # "wait a bit to be extra sure". Yuck. + time.sleep(30) + + logging.info("Shutting down Host VM after successful restore") diff --git a/lib/pool.py b/lib/pool.py index 046406890..a68b3d9cb 100644 --- a/lib/pool.py +++ b/lib/pool.py @@ -1,4 +1,5 @@ import logging +import os import traceback from packaging import version @@ -25,6 +26,7 @@ def __init__(self, master_hostname_or_ip): f"Wait for XAPI init to be complete on {master_hostname_or_ip}", timeout_secs=30 * 60) + logging.info("Getting Pool info for %r", master_hostname_or_ip) for host_uuid in self.hosts_uuids(): if host_uuid != self.hosts[0].uuid: host = Host(self, self.host_ip(host_uuid)) @@ -117,6 +119,31 @@ def first_shared_sr(self): def get_vdi_sr_uuid(self, vdi_uuid): return self.master.xe('vdi-param-get', {'uuid': vdi_uuid, 'param-name': 'sr-uuid'}) + def get_iso_sr(self): + uuids = safe_split(self.master.xe('sr-list', {'type': 'iso', 'is-tools-sr': False}, + minimal=True)) + assert len(uuids) == 1 # we may need to allow finer selection if this triggers + return SR(uuids[0], self) + + def push_iso(self, local_file, remote_filename=None): + iso_sr = self.get_iso_sr() + mountpoint = f"/run/sr-mount/{iso_sr.uuid}" + if remote_filename is None: + # needs only work on XCP-ng 8.2+ + remote_filename = self.master.ssh(["mktemp --suffix=.iso -p", mountpoint]) + self.master.ssh(["chmod 644", remote_filename]) + + logging.info("Uploading to ISO-SR %s as %s", local_file, remote_filename) + self.master.scp(local_file, remote_filename) + iso_sr.scan() + return os.path.basename(remote_filename) + + def remove_iso(self, remote_filename): + iso_sr = self.get_iso_sr() + fullpath = f"/run/sr-mount/{iso_sr.uuid}/{remote_filename}" + logging.info("Removing %s from ISO-SR server", remote_filename) + self.master.ssh(["rm", fullpath]) + def save_uefi_certs(self): """ Save UEFI certificates in order to restore them later. XCP-ng 8.2 only. @@ -254,3 +281,6 @@ def eject_host(self, host): wait_for_not(lambda: host.uuid in self.hosts_uuids(), f"Wait for host {host} to be ejected of pool {master}.") self.hosts = [h for h in self.hosts if h.uuid != host.uuid] wait_for(host.is_enabled, f"Wait for host {host} to restart in its own pool.", timeout_secs=10 * 60) + + def network_named(self, network_name): + return self.master.xe('network-list', {'name-label': network_name}, minimal=True) diff --git a/lib/vdi.py b/lib/vdi.py index 3b3317310..0b38eb137 100644 --- a/lib/vdi.py +++ b/lib/vdi.py @@ -9,7 +9,7 @@ def __init__(self, uuid, *, host=None, sr=None): self.uuid = uuid # TODO: use a different approach when migration is possible if sr is None: - sr_uuid = host.get_vdi_sr_uuid(uuid) + sr_uuid = host.pool.get_vdi_sr_uuid(uuid) # avoid circular import # FIXME should get it from Host instead from lib.sr import SR diff --git a/lib/vm.py b/lib/vm.py index 7137a424e..0b74ac6f2 100644 --- a/lib/vm.py +++ b/lib/vm.py @@ -8,7 +8,9 @@ from lib.basevm import BaseVM from lib.common import PackageManagerEnum, parse_xe_dict, safe_split, wait_for, wait_for_not +from lib.common import shortened_nodeid, expand_scope_relative_nodeid from lib.snapshot import Snapshot +from lib.vbd import VBD from lib.vif import VIF class VM(BaseVM): @@ -247,6 +249,18 @@ def vifs(self): _vifs.append(VIF(vif_uuid, self)) return _vifs + def create_vif(self, vif_num, *, network_uuid=None, network_name=None): + assert bool(network_uuid) != bool(network_name), \ + "create_vif needs network_uuid XOR network_name" + if network_name: + network_uuid = self.host.pool.network_named(network_name) + assert network_uuid, f"No UUID given, and network name {network_name!r} not found" + logging.info("Create VIF %d to network %r on VM %s", vif_num, network_uuid, self.uuid) + self.host.xe('vif-create', {'vm-uuid': self.uuid, + 'device': str(vif_num), + 'network-uuid': network_uuid, + }) + def is_running_on_host(self, host): return self.is_running() and self.param_get('resident-on') == host.uuid @@ -473,6 +487,27 @@ def destroy_vtpm(self): logging.info("Destroying vTPM %s" % vtpm_uuid) return self.host.xe('vtpm-destroy', {'uuid': vtpm_uuid}, force=True) + def create_vbd(self, device, vdi_uuid): + logging.info("Create VBD %r for VDI %r on VM %s", device, vdi_uuid, self.uuid) + vbd_uuid = self.host.xe('vbd-create', {'vm-uuid': self.uuid, + 'device': device, + 'vdi-uuid': vdi_uuid, + }) + logging.info("New VBD %s", vbd_uuid) + return VBD(vbd_uuid, self, device) + + def create_cd_vbd(self, device, userdevice): + logging.info("Create CD VBD %r on VM %s", device, self.uuid) + vbd_uuid = self.host.xe('vbd-create', {'vm-uuid': self.uuid, + 'device': device, + 'type': 'CD', + 'mode': 'RO', + }) + vbd = VBD(vbd_uuid, self, device) + vbd.param_set(param_name="userdevice", value=userdevice) + logging.info("New VBD %s", vbd_uuid) + return vbd + def clone(self, *, name=None): if name is None: name = self.name() + '_clone_for_tests' @@ -587,3 +622,29 @@ def is_cert_present(vm, key): res = vm.host.ssh(['varstore-get', vm.uuid, efi.get_secure_boot_guid(key).as_str(), key], check=False, simple_output=False, decode=False) return res.returncode == 0 + + def save_to_cache(self, cache_id): + logging.info("Save VM %s to cache for %r as a clone" % (self.uuid, cache_id)) + + while True: + old_vm = self.host.cached_vm(cache_id, sr_uuid=self.host.main_sr_uuid()) + if old_vm is None: + break + logging.info("Destroying old cache %s first", old_vm.uuid) + old_vm.destroy() + + clone = self.clone(name=f"{self.name()} cache") + logging.info(f"Marking VM {clone.uuid} as cached") + clone.param_set('name-description', self.host.vm_cache_key(cache_id)) + + +def vm_cache_key_from_def(vm_def, ref_nodeid, test_gitref): + vm_name = vm_def["name"] + image_test = vm_def["image_test"] + image_vm = vm_def.get("image_vm", vm_name) + image_scope = vm_def.get("image_scope", "module") + nodeid = shortened_nodeid(expand_scope_relative_nodeid(image_test, image_scope, ref_nodeid)) + image_key = f"{nodeid}-{image_vm}-{test_gitref}" + + from data import IMAGE_EQUIVS + return IMAGE_EQUIVS.get(image_key, image_key) diff --git a/pytest.ini b/pytest.ini index fe93b353b..6886cce3e 100644 --- a/pytest.ini +++ b/pytest.ini @@ -18,6 +18,14 @@ markers = unix_vm: tests that require a unix/linux VM to run. windows_vm: tests that require a Windows VM to run. + # * VM-related markers to give parameters to fixtures + vm_definitions: dicts of VM nick to VM defs for create_vms fixture. + continuation_of: dicts of VM nick to test (and soon VM nick) from which to start + + # * installation-related markers to customize installer run + answerfile: dict defining an answerfile + package_source: source of packages during installation. + # * Test targets related to VMs small_vm: tests that it is enough to run just once, using the smallest possible VM. big_vm: tests that it would be good to run with a big VM. diff --git a/requirements/base.txt b/requirements/base.txt index 3df8b072e..72272ea18 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,4 +1,7 @@ cryptography>=3.3.1 +GitPython packaging>=20.7 pytest>=8.0.0 pluggy>=1.1.0 +requests +pytest-dependency diff --git a/tests/install/__init__.py b/tests/install/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/install/conftest.py b/tests/install/conftest.py new file mode 100644 index 000000000..7982bd83e --- /dev/null +++ b/tests/install/conftest.py @@ -0,0 +1,342 @@ +import logging +import os +import pytest +import pytest_dependency +import tempfile +import xml.etree.ElementTree as ET + +from lib import installer, pxe +from lib.common import callable_marker, url_download, wait_for +from lib.installer import AnswerFile +from lib.commands import local_cmd + +from data import (ISO_IMAGES, ISO_IMAGES_BASE, ISO_IMAGES_CACHE, + PXE_CONFIG_SERVER, TEST_SSH_PUBKEY, TOOLS) + +# Return true if the version of the ISO doesn't support the source type. +# Note: this is a quick-win hack, to avoid explicit enumeration of supported +# package_source values for each ISO. +def skip_package_source(version, package_source): + if version not in ISO_IMAGES: + return True, "version of ISO {} is unknown".format(version) + + if package_source == "iso": + if ISO_IMAGES[version].get('net-only', False): + return True, "ISO image is net-only while package_source is local" + + return False, "do not skip" + + if package_source == "net": + # Net install is not valid if there is no netinstall URL + # FIXME: ISO includes a default URL so we should be able to omit net-url + if 'net-url' not in ISO_IMAGES[version].keys(): + return True, "net-url required for netinstall was not found for {}".format(version) + + return False, "do not skip" + + # If we don't know the source type then it is invalid + return True, "unknown source type {}".format(package_source) + +@pytest.fixture(scope='function') +def answerfile(request): + """ + Makes an AnswerFile object available to test and other fixtures. + + AnswerFile object are typically generated from a template + customizable in `data.py` specified to the ctor, and extended by: + - adding attributes to the top element + - appending new elements to the top element's children + + > @pytest.mark.answerfile(lambda firmware: AnswerFile("INSTALL") + > .top_setattr({"sr-type": local_sr}) + > .top_append( + > {"TAG": "source", "type": "local"}, + > {"TAG": "primary-disk", + > "guest-storage": "yes", + > "CONTENTS": {"uefi": "nvme0n1", "bios": "sda"}[firmware]}, + > )) + > def test_install(answerfile): + > answerfile.write_xml("my-answers.xml") + """ + marker = request.node.get_closest_marker("answerfile") + + if marker is None: + yield None # no answerfile to generate + return + + # construct answerfile definition from option "base", and explicit bits + answerfile_def = callable_marker(marker.args[0], request) + assert isinstance(answerfile_def, AnswerFile) + + answerfile_def.top_append( + dict(TAG="admin-interface", + name="eth0", + proto="dhcp", + ), + ) + + yield answerfile_def + + +@pytest.fixture(scope='function') +def installer_iso(request): + iso_key = request.getfixturevalue("iso_version") + package_source = request.getfixturevalue("package_source") + skip, reason = skip_package_source(iso_key, package_source) + if skip: + pytest.skip(reason) + assert iso_key in ISO_IMAGES, f"ISO_IMAGES does not have a value for {iso_key}" + iso = ISO_IMAGES[iso_key]['path'] + if iso.startswith("/"): + assert os.path.exists(iso), f"file not found: {iso}" + local_iso = iso + else: + cached_iso = os.path.join(ISO_IMAGES_CACHE, os.path.basename(iso)) + if not os.path.exists(cached_iso): + url = iso if ":/" in iso else (ISO_IMAGES_BASE + iso) + logging.info("installer_iso: downloading %r into %r", url, cached_iso) + url_download(url, cached_iso) + local_iso = cached_iso + logging.info("installer_iso: using %r", local_iso) + return dict(iso=local_iso, + unsigned=ISO_IMAGES[iso_key].get('unsigned', False), + ) + +@pytest.fixture(scope='function') +def install_disk(request): + firmware = request.getfixturevalue("firmware") + yield {"uefi": "nvme0n1", "bios": "sda"}[firmware] + +# Remasters the ISO sepecified by `installer_iso` mark, with: +# - network and ssh support activated, and .ssh/authorized_key so tests can +# go probe installation process +# - a test-pingpxe.service running in installer system, to make it possible +# for the test to determine the dynamic IP obtained during installation +# - atexit=shell to prevent the system from spontaneously rebooting +# - a generated answerfile to make the install process non-interactive +# - a postinstall script to modify the installed system with: +# - the same .ssh/authorized_key +# - the same test-pingpxe.service, which is also useful even with static IP, +# in contexts where the same IP is reused by successively different MACs +# (when cloning VMs from cache) +@pytest.fixture(scope='function') +def remastered_iso(installer_iso, answerfile): + iso_file = installer_iso['iso'] + unsigned = installer_iso['unsigned'] + + assert "iso-remaster" in TOOLS + iso_remaster = TOOLS["iso-remaster"] + assert os.access(iso_remaster, os.X_OK) + + with tempfile.TemporaryDirectory() as isotmp: + remastered_iso = os.path.join(isotmp, "image.iso") + img_patcher_script = os.path.join(isotmp, "img-patcher") + iso_patcher_script = os.path.join(isotmp, "iso-patcher") + answerfile_xml = os.path.join(isotmp, "answerfile.xml") + + if answerfile: + logging.info("generating answerfile %s", answerfile_xml) + answerfile.top_append(dict(TAG="script", stage="filesystem-populated", + type="url", CONTENTS="file:///root/postinstall.sh")) + if unsigned: + answerfile.top_setattr({'gpgcheck': "false", + 'repo-gpgcheck': "false", + }) + answerfile.write_xml(answerfile_xml) + else: + logging.info("no answerfile") + + logging.info("Remastering %s to %s", iso_file, remastered_iso) + + # generate install.img-patcher script + with open(img_patcher_script, "xt") as patcher_fd: + print(f"""#!/bin/bash +set -ex +INSTALLIMG="$1" + +mkdir -p "$INSTALLIMG/root/.ssh" +echo "{TEST_SSH_PUBKEY}" > "$INSTALLIMG/root/.ssh/authorized_keys" + +test ! -e "{answerfile_xml}" || + cp "{answerfile_xml}" "$INSTALLIMG/root/answerfile.xml" + +mkdir -p "$INSTALLIMG/usr/local/sbin" +cat > "$INSTALLIMG/usr/local/sbin/test-pingpxe.sh" << 'EOF' +#! /bin/bash +set -eE +set -o pipefail + +ether_of () {{ + ifconfig "$1" | grep ether | sed 's/.*ether \\([^ ]*\\).*/\\1/' +}} + +# on installed system, avoid xapi-project/xen-api#5799 +if ! [ -e /opt/xensource/installer ]; then + eth_mac=$(ether_of eth0) + br_mac=$(ether_of xenbr0) + + # wait for bridge MAC to be fixed + test "$eth_mac" = "$br_mac" +fi + +if [ $(readlink "/bin/ping") = busybox ]; then + # XS before 7.0 + PINGARGS="" +else + PINGARGS="-c1" +fi + +ping $PINGARGS "$1" +EOF +chmod +x "$INSTALLIMG/usr/local/sbin/test-pingpxe.sh" + +if [ -d "$INSTALLIMG/etc/systemd/system" ]; then + cat > "$INSTALLIMG/etc/systemd/system/test-pingpxe.service" < "$INSTALLIMG/etc/init.d/S12test-pingpxe" <<'EOF' +#!/bin/sh +case "$1" in + start) + sh -c 'while ! /usr/local/sbin/test-pingpxe.sh "{PXE_CONFIG_SERVER}"; do sleep 1 ; done' & ;; + stop) ;; +esac +EOF + + chmod +x "$INSTALLIMG/etc/init.d/S12test-pingpxe" +fi + +cat > "$INSTALLIMG/root/postinstall.sh" <<'EOF' +#!/bin/sh +set -ex + +ROOT="$1" + +mkdir -p "$ROOT/usr/local/sbin" +cp /usr/local/sbin/test-pingpxe.sh "$ROOT/usr/local/sbin/test-pingpxe.sh" +if [ -d "$ROOT/etc/systemd/system" ]; then + cp /etc/systemd/system/test-pingpxe.service "$ROOT/etc/systemd/system/test-pingpxe.service" + systemctl --root="$ROOT" enable test-pingpxe.service +else + cp /etc/init.d/S12test-pingpxe "$ROOT/etc/init.d/test-pingpxe" + ln -s ../init.d/test-pingpxe "$ROOT/etc/rc3.d/S11test-pingpxe" +fi + +mkdir -p "$ROOT/root/.ssh" +echo "{TEST_SSH_PUBKEY}" >> "$ROOT/root/.ssh/authorized_keys" +EOF +""", + file=patcher_fd) + os.chmod(patcher_fd.fileno(), 0o755) + + # generate iso-patcher script + with open(iso_patcher_script, "xt") as patcher_fd: + passwd = "passw0rd" # FIXME use invalid hash? + print(f"""#!/bin/bash +set -ex +ISODIR="$1" +SED_COMMANDS=(-e "s@/vmlinuz@/vmlinuz network_device=all sshpassword={passwd} atexit=shell@") +test ! -e "{answerfile_xml}" || + SED_COMMANDS+=(-e "s@/vmlinuz@/vmlinuz install answerfile=file:///root/answerfile.xml@") +# assuming *gpgcheck only appear within unsigned ISO +test "{unsigned}" = False || + SED_COMMANDS+=(-e "s@ no-gpgcheck\\>@@" -e "s@ no-repo-gpgcheck\\>@@") + + +shopt -s nullglob # there may be no grub config, eg for XS 6.5 and earlier +sed -i "${{SED_COMMANDS[@]}}" \ + "$ISODIR"/*/*/grub*.cfg \ + "$ISODIR"/boot/isolinux/isolinux.cfg +""", + file=patcher_fd) + os.chmod(patcher_fd.fileno(), 0o755) + + # do remaster + local_cmd([iso_remaster, + "--install-patcher", img_patcher_script, + "--iso-patcher", iso_patcher_script, + iso_file, remastered_iso + ]) + + yield remastered_iso + +@pytest.fixture(scope='function') +def vm_booted_with_installer(host, create_vms, remastered_iso): + host_vm, = create_vms # one single VM + iso = remastered_iso + + vif = host_vm.vifs()[0] + mac_address = vif.param_get('MAC') + logging.info("Host VM has MAC %s", mac_address) + + remote_iso = None + try: + remote_iso = host.pool.push_iso(iso) + host_vm.insert_cd(os.path.basename(remote_iso)) + + try: + host_vm.start() + wait_for(host_vm.is_running, "Wait for host VM running") + + # catch host-vm IP address + wait_for(lambda: pxe.arp_addresses_for(mac_address), + "Wait for DHCP server to see Host VM in ARP tables", + timeout_secs=10 * 60) + ips = pxe.arp_addresses_for(mac_address) + logging.info("Host VM has IPs %s", ips) + assert len(ips) == 1 + host_vm.ip = ips[0] + + # host may not be up if ARP cache was filled + wait_for(lambda: local_cmd(["ping", "-c1", host_vm.ip], check=False), + "Wait for host up", timeout_secs=10 * 60, retry_delay_secs=10) + wait_for(lambda: local_cmd(["nc", "-zw5", host_vm.ip, "22"], check=False), + "Wait for ssh up on host", timeout_secs=10 * 60, retry_delay_secs=5) + + yield host_vm + + logging.info("Shutting down Host VM") + installer.poweroff(host_vm.ip) + wait_for(host_vm.is_halted, "Wait for host VM halted") + + except Exception as e: + logging.critical("caught exception %s", e) + host_vm.shutdown(force=True) + raise + except KeyboardInterrupt: + logging.warning("keyboard interrupt") + host_vm.shutdown(force=True) + raise + + host_vm.eject_cd() + finally: + if remote_iso: + host.pool.remove_iso(remote_iso) + +@pytest.fixture(scope='function') +def xcpng_chained(request): + # take test name from mark + marker = request.node.get_closest_marker("continuation_of") + assert marker is not None, "xcpng_chained fixture requires 'continuation_of' marker" + continuation_of = callable_marker(marker.args[0], request) + + vm_defs = [dict(name=vm_spec['vm'], + image_test=vm_spec['image_test'], + image_vm=vm_spec.get("image_vm", vm_spec['vm']), + image_scope=vm_spec.get("scope", "module"), + ) + for vm_spec in continuation_of] + + depends = [vm_spec['image_test'] for vm_spec in continuation_of] + pytest_dependency.depends(request, depends) + request.applymarker(pytest.mark.vm_definitions(*vm_defs)) diff --git a/tests/install/test-sequences/inst+upg+rst.lst b/tests/install/test-sequences/inst+upg+rst.lst new file mode 100644 index 000000000..c23878e4c --- /dev/null +++ b/tests/install/test-sequences/inst+upg+rst.lst @@ -0,0 +1,2 @@ +tests/install/test.py::TestNested::test_restore[uefi-83nightly-83nightly-83nightly-iso-ext] +tests/install/test.py::TestNested::test_boot_rst[uefi-83nightly-83nightly-83nightly-iso-ext] diff --git a/tests/install/test-sequences/inst+upg.lst b/tests/install/test-sequences/inst+upg.lst new file mode 100644 index 000000000..100e53593 --- /dev/null +++ b/tests/install/test-sequences/inst+upg.lst @@ -0,0 +1,2 @@ +tests/install/test.py::TestNested::test_upgrade[uefi-83nightly-83nightly-host1-iso-ext] +tests/install/test.py::TestNested::test_boot_upg[uefi-83nightly-83nightly-host1-iso-ext] diff --git a/tests/install/test-sequences/inst.lst b/tests/install/test-sequences/inst.lst new file mode 100644 index 000000000..9b92eea31 --- /dev/null +++ b/tests/install/test-sequences/inst.lst @@ -0,0 +1,3 @@ +tests/install/test.py::TestNested::test_install[uefi-83nightly-iso-ext] +tests/install/test.py::TestNested::test_tune_firstboot[None-uefi-83nightly-host1-iso-ext] +tests/install/test.py::TestNested::test_boot_inst[uefi-83nightly-host1-iso-ext] diff --git a/tests/install/test.py b/tests/install/test.py new file mode 100644 index 000000000..e694e607d --- /dev/null +++ b/tests/install/test.py @@ -0,0 +1,422 @@ +import logging +import pytest +from uuid import uuid4 + +from lib import commands, installer, pxe +from lib.common import safe_split, wait_for +from lib.installer import AnswerFile +from lib.pif import PIF +from lib.pool import Pool +from lib.vdi import VDI + +from data import ISO_IMAGES, NETWORKS +assert "MGMT" in NETWORKS + +# Requirements: +# - one XCP-ng host capable of nested virt, with an ISO SR, and a default SR +# - the "small_vm" ISO must have in authorized_keys a SSH key accepted by the +# ssh server in the installed host version (7.x and earlier reject current +# ssh-rsa keys, a public ssh-ed25519 key listed in TEST_SSH_PUBKEY should be +# there) + +@pytest.fixture +def helper_vm_with_plugged_disk(running_vm, create_vms): + helper_vm = running_vm + host_vm, = create_vms + + all_vdis = [VDI(uuid, host=host_vm.host) for uuid in host_vm.vdi_uuids()] + disk_vdis = [vdi for vdi in all_vdis if not vdi.readonly()] + vdi, = disk_vdis + + vbd = helper_vm.create_vbd("1", vdi.uuid) + try: + vbd.plug() + + yield helper_vm + + finally: + vbd.unplug() + vbd.destroy() + +@pytest.mark.dependency() +class TestNested: + @pytest.mark.parametrize("local_sr", ("nosr", "ext", "lvm")) + @pytest.mark.parametrize("package_source", ("iso", "net")) + @pytest.mark.parametrize("iso_version", ( + "83nightly", "83rcnet", + "83rc1", "83b2", "83b1", + "821.1", + "81", "80", "76", "75", + "xs8", "ch821.1", + "xs70", + )) + @pytest.mark.parametrize("firmware", ("uefi", "bios")) + @pytest.mark.vm_definitions( + lambda firmware: dict( + name="vm1", + template="Other install media", + params=( + # dict(param_name="", value=""), + dict(param_name="memory-static-max", value="4GiB"), + dict(param_name="memory-dynamic-max", value="4GiB"), + dict(param_name="memory-dynamic-min", value="4GiB"), + dict(param_name="VCPUs-max", value="2"), + dict(param_name="VCPUs-at-startup", value="2"), + dict(param_name="platform", key="exp-nested-hvm", value="true"), # FIXME < 8.3 host? + dict(param_name="HVM-boot-params", key="order", value="dc"), + ) + { + "uefi": ( + dict(param_name="HVM-boot-params", key="firmware", value="uefi"), + dict(param_name="platform", key="device-model", value="qemu-upstream-uefi"), + ), + "bios": (), + }[firmware], + vdis=[dict(name="vm1 system disk", size="100GiB", device="xvda", userdevice="0")], + cd_vbd=dict(device="xvdd", userdevice="3"), + vifs=[dict(index=0, network_name=NETWORKS["MGMT"])], + )) + @pytest.mark.answerfile( + lambda install_disk, local_sr, package_source, iso_version: AnswerFile("INSTALL") + .top_setattr({} if local_sr == "nosr" else {"sr-type": local_sr}) + .top_append( + {"TAG": "source", "type": "local"} if package_source == "iso" + else {"TAG": "source", "type": "url", + "CONTENTS": ISO_IMAGES[iso_version]['net-url']} if package_source == "net" + else {}, + {"TAG": "primary-disk", + "guest-storage": "no" if local_sr == "nosr" else "yes", + "CONTENTS": install_disk}, + )) + def test_install(self, vm_booted_with_installer, install_disk, + firmware, iso_version, package_source, local_sr): + host_vm = vm_booted_with_installer + installer.monitor_install(ip=host_vm.ip) + + @pytest.mark.usefixtures("xcpng_chained") + @pytest.mark.parametrize("local_sr", ("nosr", "ext", "lvm")) + @pytest.mark.parametrize("package_source", ("iso", "net")) + @pytest.mark.parametrize("machine", ("host1", "host2")) + @pytest.mark.parametrize("version", ( + "83nightly", "83rcnet", + "83rc1", "83b2", "83b1", + "821.1", + "81", "80", + "76", "75", + "ch821.1", "xs8", + "xs70", + )) + @pytest.mark.parametrize("firmware", ("uefi", "bios")) + @pytest.mark.continuation_of( + lambda version, firmware, local_sr, package_source: [dict( + vm="vm1", + image_test=f"TestNested::test_install[{firmware}-{version}-{package_source}-{local_sr}]")]) + @pytest.mark.small_vm + def test_tune_firstboot(self, create_vms, helper_vm_with_plugged_disk, + firmware, version, machine, local_sr, package_source): + helper_vm = helper_vm_with_plugged_disk + + helper_vm.ssh(["mount /dev/xvdb1 /mnt"]) + try: + # hostname + logging.info("Setting hostname to %r", machine) + helper_vm.ssh(["echo > /mnt/etc/hostname", machine]) + # UUIDs + logging.info("Randomizing UUIDs") + helper_vm.ssh( + ['sed -i', + f'''-e "/^INSTALLATION_UUID=/ s/.*/INSTALLATION_UUID='{uuid4()}'/"''', + f'''-e "/^CONTROL_DOMAIN_UUID=/ s/.*/CONTROL_DOMAIN_UUID='{uuid4()}'/"''', + '/mnt/etc/xensource-inventory']) + helper_vm.ssh(["grep UUID /mnt/etc/xensource-inventory"]) + finally: + helper_vm.ssh(["umount /dev/xvdb1"]) + + def _test_firstboot(self, create_vms, mode, *, machine='DEFAULT', is_restore=False): + host_vm = create_vms[0] + vif = host_vm.vifs()[0] + mac_address = vif.param_get('MAC') + logging.info("Host VM has MAC %s", mac_address) + + # succession of insta/upg/rst operations + split_mode = mode.split("-") + if is_restore: + # restore: back to previous installed version + expected_rel_id = split_mode[-3] + else: + expected_rel_id = split_mode[-1] + expected_rel = { + "83nightly": "8.3.0", + "83rcnet": "8.3.0", + "83rc1": "8.3.0", + "83b2": "8.3.0", + "83b1": "8.3.0", + "821.1": "8.2.1", + "81": "8.1.0", + "80": "8.0.0", + "76": "7.6.0", + "75": "7.5.0", + # + "xs8": "8.4.0", + "ch821.1": "8.2.1", + "xs70": "7.0.0-125380c", + }[expected_rel_id] + + # determine version info from `mode` + if expected_rel_id.startswith("xs"): + expected_dist = "XenServer" + elif expected_rel_id.startswith("ch"): + expected_dist = "CitrixHypervisor" + else: + expected_dist = "XCP-ng" + + try: + host_vm.start() + wait_for(host_vm.is_running, "Wait for host VM running") + + # catch host-vm IP address + wait_for(lambda: pxe.arp_addresses_for(mac_address), + "Wait for DHCP server to see Host VM in ARP tables", + timeout_secs=10 * 60) + ips = pxe.arp_addresses_for(mac_address) + logging.info("Host VM has IPs %s", ips) + assert len(ips) == 1 + host_vm.ip = ips[0] + + wait_for( + lambda: commands.local_cmd( + ["nc", "-zw5", host_vm.ip, "22"], check=False).returncode == 0, + "Wait for ssh back up on Host VM", retry_delay_secs=5, timeout_secs=4 * 60) + + logging.info("Checking installed version (expecting %r %r)", + expected_dist, expected_rel) + lsb_dist = commands.ssh(host_vm.ip, ["lsb_release", "-si"]) + lsb_rel = commands.ssh(host_vm.ip, ["lsb_release", "-sr"]) + assert (lsb_dist, lsb_rel) == (expected_dist, expected_rel) + + # pool master must be reachable here + pool = Pool(host_vm.ip) + + # wait for XAPI + wait_for(pool.master.is_enabled, "Wait for XAPI to be ready", timeout_secs=30 * 60) + + if lsb_rel in ["8.2.1", "8.3.0", "8.4.0"]: + SERVICES = ["control-domain-params-init", + "network-init", + "storage-init", + "generate-iscsi-iqn", + "create-guest-templates", + ] + STAMPS_DIR = "/var/lib/misc" + STAMPS = [f"ran-{service}" for service in SERVICES] + elif lsb_rel in ["7.0.0-125380c", "7.5.0", "7.6.0", "8.0.0", "8.1.0"]: + SERVICES = ["xs-firstboot"] + STAMPS_DIR = "/etc/firstboot.d/state" + STAMPS = [ + "10-prepare-storage", + "15-set-default-storage", + "20-udev-storage", + "25-multipath", + "40-generate-iscsi-iqn", + "50-prepare-control-domain-params", + "60-upgrade-likewise-to-pbis", + "90-flush-pool-db", + "95-legacy-logrotate", + "99-remove-firstboot-flag", + ] + if lsb_rel in ["7.0.0-125380c"]: + STAMPS += [ + "61-regenerate-old-templates", + ] + if lsb_rel in ["7.5.0", "7.6.0", "8.0.0", "8.1.0"]: + STAMPS += [ + "05-prepare-networking", + "60-import-keys", + "62-create-guest-templates", + ] + if lsb_rel in ["8.0.0", "8.1.0"]: + STAMPS += [ + "80-common-criteria", + ] + else: + raise AssertionError(f"Unhandled LSB release {lsb_rel!r}") + # check for firstboot issues + # FIXME: flaky, must check logs extraction on failure + try: + for stamp in sorted(STAMPS): + wait_for(lambda: pool.master.ssh(["test", "-e", f"{STAMPS_DIR}/{stamp}"], + check=False, simple_output=False, + ).returncode == 0, + f"Wait for {stamp} stamp") + except TimeoutError: + logging.warning("investigating lack of %s service stamp", stamp) + for service in SERVICES: + out = pool.master.ssh(["systemctl", "status", service], check=False) + logging.warning("service status: %s", out) + out = pool.master.ssh(["grep", "-r", service, "/var/log"], check=False) + logging.warning("in logs: %s", out) + raise + + logging.info("Powering off pool master") + try: + # use "poweroff" because "reboot" would cause ARP and + # SSH to be checked before host is down, and require + # ssh retries + pool.master.ssh(["poweroff"]) + except commands.SSHCommandFailed as e: + # ignore connection closed by reboot + if e.returncode == 255 and "closed by remote host" in e.stdout: + logging.info("sshd closed the connection") + pass + else: + raise + + wait_for(host_vm.is_halted, "Wait for host VM halted") + + except Exception as e: + logging.critical("caught exception %s", e) + # wait_for(lambda: False, 'Wait "forever"', timeout_secs=100 * 60) + host_vm.shutdown(force=True) + raise + except KeyboardInterrupt: + logging.warning("keyboard interrupt") + # wait_for(lambda: False, 'Wait "forever"', timeout_secs=100 * 60) + host_vm.shutdown(force=True) + raise + + @pytest.mark.usefixtures("xcpng_chained") + @pytest.mark.parametrize("local_sr", ("nosr", "ext", "lvm")) + @pytest.mark.parametrize("package_source", ("iso", "net")) + @pytest.mark.parametrize("machine", ("host1", "host2")) + @pytest.mark.parametrize("version", ( + "83nightly", "83rcnet", + "83rc1", "83b2", "83b1", + "821.1", + "81", "80", + "76", "75", + "ch821.1", "xs8", + "xs70", + )) + @pytest.mark.parametrize("firmware", ("uefi", "bios")) + @pytest.mark.continuation_of( + lambda firmware, version, machine, local_sr, package_source: [ + dict(vm="vm1", + image_test=("TestNested::test_tune_firstboot" + f"[None-{firmware}-{version}-{machine}-{package_source}-{local_sr}]"))]) + def test_boot_inst(self, create_vms, + firmware, version, machine, package_source, local_sr): + self._test_firstboot(create_vms, version, machine=machine) + + @pytest.mark.usefixtures("xcpng_chained") + @pytest.mark.parametrize("local_sr", ("nosr", "ext", "lvm")) + @pytest.mark.parametrize("package_source", ("iso", "net")) + @pytest.mark.parametrize("machine", ("host1", "host2")) + @pytest.mark.parametrize(("orig_version", "iso_version"), [ + ("83nightly", "83nightly"), + ("83rc1", "83nightly"), + ("83b2", "83nightly"), + ("83b1", "83nightly"), + ("821.1", "83nightly"), + ("81", "83nightly"), + ("80", "83nightly"), + ("xs8", "83nightly"), + ("ch821.1", "83nightly"), + ("83rcnet", "83rcnet"), # FIXME + ("821.1", "821.1"), + ]) + @pytest.mark.parametrize("firmware", ("uefi", "bios")) + @pytest.mark.continuation_of( + lambda firmware, orig_version, machine, package_source, local_sr: [dict( + vm="vm1", + image_test=f"TestNested::test_boot_inst[{firmware}-{orig_version}-{machine}-{package_source}-{local_sr}]")]) + @pytest.mark.answerfile( + lambda install_disk: AnswerFile("UPGRADE").top_append( + {"TAG": "source", "type": "local"}, + {"TAG": "existing-installation", + "CONTENTS": install_disk}, + )) + def test_upgrade(self, vm_booted_with_installer, install_disk, + firmware, orig_version, iso_version, machine, package_source, local_sr): + host_vm = vm_booted_with_installer + installer.monitor_upgrade(ip=host_vm.ip) + + @pytest.mark.usefixtures("xcpng_chained") + @pytest.mark.parametrize("local_sr", ("nosr", "ext", "lvm")) + @pytest.mark.parametrize("package_source", ("iso", "net")) + @pytest.mark.parametrize("machine", ("host1", "host2")) + @pytest.mark.parametrize("mode", ( + "83nightly-83nightly", + "83rc1-83nightly", + "83b2-83nightly", + "83b1-83nightly", + "821.1-83nightly", + "81-83nightly", + "80-83nightly", + "ch821.1-83nightly", + "xs8-83nightly", + "83rcnet-83rcnet", + "821.1-821.1", + )) + @pytest.mark.parametrize("firmware", ("uefi", "bios")) + @pytest.mark.continuation_of( + lambda firmware, mode, machine, package_source, local_sr: [dict( + vm="vm1", + image_test=(f"TestNested::test_upgrade[{firmware}-{mode}-{machine}-{package_source}-{local_sr}]"))]) + def test_boot_upg(self, create_vms, + firmware, mode, machine, package_source, local_sr): + self._test_firstboot(create_vms, mode, machine=machine) + + @pytest.mark.usefixtures("xcpng_chained") + @pytest.mark.parametrize("local_sr", ("nosr", "ext", "lvm")) + @pytest.mark.parametrize("package_source", ("iso", "net")) + @pytest.mark.parametrize(("orig_version", "iso_version"), [ + ("83nightly-83nightly", "83nightly"), + ("83rc1-83nightly", "83nightly"), + ("83b2-83nightly", "83nightly"), + ("83b1-83nightly", "83nightly"), + ("821.1-83nightly", "83nightly"), + ("81-83nightly", "83nightly"), + ("80-83nightly", "83nightly"), + ("ch821.1-83nightly", "83nightly"), + ("xs8-83nightly", "83nightly"), + ("83rcnet-83rcnet", "83rcnet"), # FIXME + ("821.1-821.1", "821.1"), + ]) + @pytest.mark.parametrize("firmware", ("uefi", "bios")) + @pytest.mark.continuation_of( + lambda firmware, orig_version, local_sr, package_source: [dict( + vm="vm1", + image_test=f"TestNested::test_boot_upg[{firmware}-{orig_version}-host1-{package_source}-{local_sr}]")]) + @pytest.mark.answerfile( + lambda install_disk: AnswerFile("RESTORE").top_append( + {"TAG": "backup-disk", + "CONTENTS": install_disk}, + )) + def test_restore(self, vm_booted_with_installer, install_disk, + firmware, orig_version, iso_version, package_source, local_sr): + host_vm = vm_booted_with_installer + installer.monitor_restore(ip=host_vm.ip) + + @pytest.mark.usefixtures("xcpng_chained") + @pytest.mark.parametrize("local_sr", ("nosr", "ext", "lvm")) + @pytest.mark.parametrize("package_source", ("iso", "net")) + @pytest.mark.parametrize("mode", ( + "83nightly-83nightly-83nightly", + "83rc1-83nightly-83nightly", + "83b2-83nightly-83nightly", + "83b1-83nightly-83nightly", + "821.1-83nightly-83nightly", + "81-83nightly-83nightly", + "80-83nightly-83nightly", + "ch821.1-83nightly-83nightly", + "xs8-83nightly-83nightly", + "83rcnet-83rcnet", "83rcnet-83rcnet-83rcnet", # FIXME + "821.1-821.1-821.1", + )) + @pytest.mark.parametrize("firmware", ("uefi", "bios")) + @pytest.mark.continuation_of( + lambda firmware, mode, package_source, local_sr: [dict( + vm="vm1", + image_test=(f"TestNested::test_restore[{firmware}-{mode}-{package_source}-{local_sr}]"))]) + def test_boot_rst(self, create_vms, + firmware, mode, package_source, local_sr): + self._test_firstboot(create_vms, mode, is_restore=True) diff --git a/tests/install/test_fixtures.py b/tests/install/test_fixtures.py new file mode 100644 index 000000000..64d21c3c4 --- /dev/null +++ b/tests/install/test_fixtures.py @@ -0,0 +1,31 @@ +import logging +import pytest + +from lib.installer import AnswerFile + +# test the answerfile fixture can run on 2 parametrized instances +# of the test in one run +@pytest.mark.answerfile(lambda: AnswerFile("INSTALL").top_append( + {"TAG": "source", "type": "local"}, + {"TAG": "primary-disk", "text": "nvme0n1"}, +)) +@pytest.mark.parametrize("parm", [ + 1, + pytest.param(2, marks=[ + pytest.mark.dependency(depends=["TestFixtures::test_parametrized_answerfile[1]"]), + ]), +]) +@pytest.mark.dependency +def test_parametrized_answerfile(answerfile, parm): + logging.debug("test_parametrized_answerfile with parm=%s", parm) + +@pytest.mark.dependency +class TestManualChaining: + @pytest.mark.vm_definitions(dict(name="vm1", template="Other install media")) + def test_create(self, create_vms): + logging.debug("TestManualChaining::test_create: %s", create_vms) + + @pytest.mark.dependency(depends=["TestManualChaining::test_create"]) + @pytest.mark.vm_definitions(dict(name="vm1", image_test="TestManualChaining::test_create")) + def test_chain(self, create_vms): + logging.debug("TestManualChaining::test_chain") diff --git a/tests/misc/test_pool.py b/tests/misc/test_pool.py new file mode 100644 index 000000000..9597669ad --- /dev/null +++ b/tests/misc/test_pool.py @@ -0,0 +1,9 @@ +from lib import host + +# Requirements: +# From --hosts parameter: +# - host(A1): first XCP-ng host >= 8.2. +# - hostB1: Master of a second pool. + +def test_pool_join(hostA1, hostB1): + hostB1.join_pool(hostA1.pool)