From e0e15c7ba4122d8647cbb682288f02b840311d47 Mon Sep 17 00:00:00 2001 From: chunfuwen Date: Thu, 19 Sep 2024 01:21:25 -0400 Subject: [PATCH] Add new image type:vdh and gce support in bootc image builder vdh and gce are newly introduced support disk image type, and are only supported on upstream Signed-off-by: chunfuwen --- .../bootc_image_build_utils.py | 191 +++++++++++++++++- .../bootc_disk_image_build.cfg | 37 +++- .../bootc_disk_image_install.cfg | 27 ++- .../bootc_disk_image_build.py | 2 + .../bootc_disk_image_install.py | 15 +- 5 files changed, 251 insertions(+), 21 deletions(-) diff --git a/provider/bootc_image_builder/bootc_image_build_utils.py b/provider/bootc_image_builder/bootc_image_build_utils.py index 5d369fbd46..faac64e61c 100644 --- a/provider/bootc_image_builder/bootc_image_build_utils.py +++ b/provider/bootc_image_builder/bootc_image_build_utils.py @@ -65,7 +65,10 @@ def podman_command_build(bib_image_url, disk_image_type, image_ref, config=None, os.makedirs("/var/lib/libvirt/images/output") cmd = "sudo podman run --rm -it --privileged --pull=newer --security-opt label=type:unconfined_t -v /var/lib/libvirt/images/output:/output" if config: - cmd += " -v %s:/config.json " % config + if "toml" in config: + cmd += " -v %s:/config.toml " % config + else: + cmd += " -v %s:/config.json " % config if local_container: cmd += " -v /var/lib/containers/storage:/var/lib/containers/storage " @@ -83,7 +86,10 @@ def podman_command_build(bib_image_url, disk_image_type, image_ref, config=None, " --type %s --tls-verify=%s " % (bib_image_url, disk_image_type, tls_verify) if config: - cmd += " --config /config.json " + if "toml" in config: + cmd += " --config /config.toml " + else: + cmd += " --config /config.json " if target_arch: cmd += " --target-arch=%s " % target_arch @@ -173,6 +179,19 @@ def create_config_json_file(params): password = params.get("os_password") kickstart = "yes" == params.get("kickstart") public_key_path = os.path.join(os.path.expanduser("~/.ssh/"), "id_rsa.pub") + filesystem_size_set = "yes" == params.get("filesystem_size_set") + + filesystem_dict = {"filesystem": [ + { + "mountpoint": "/", + "minsize": "10 GiB" + }, + { + "mountpoint": "/var/data", + "minsize": "15 GiB" + } + ] + } if not os.path.exists(public_key_path): LOG.debug("public key doesn't exist, will help create one") key_gen_cmd = "ssh-keygen -q -t rsa -N '' <<< $'\ny' >/dev/null 2>&1" @@ -222,12 +241,84 @@ def create_config_json_file(params): } } + if filesystem_size_set: + cfg['blueprint']['customizations'].update(filesystem_dict) + LOG.debug("what is cfg:%s", cfg) config_json_path = pathlib.Path(folder) / "config.json" config_json_path.write_text(json.dumps(cfg), encoding="utf-8") return os.path.join(folder, "config.json") +def create_config_toml_file(params): + """ + create toml configuration file + + :param params: one dictionary to pass in configuration + """ + folder = params.get("config_file_path") + username = params.get("os_username") + password = params.get("os_password") + kickstart = "yes" == params.get("kickstart") + public_key_path = os.path.join(os.path.expanduser("~/.ssh/"), "id_rsa.pub") + filesystem_size_set = "yes" == params.get("filesystem_size_set") + filesystem_size_str = "" + + if not os.path.exists(public_key_path): + LOG.debug("public key doesn't exist, will help create one") + key_gen_cmd = "ssh-keygen -q -t rsa -N '' <<< $'\ny' >/dev/null 2>&1" + process.run(key_gen_cmd, shell=True, ignore_status=False) + + with open(public_key_path, 'r') as ssh: + key_value = ssh.read().rstrip() + + if filesystem_size_set: + filesystem_size_str = f""" + [[customizations.filesystem]] + mountpoint = "/" + minsize = "10 GiB" + + [[customizations.filesystem]] + mountpoint = "/var/data" + minsize = "20 GiB" + """ + if not kickstart: + container_file_content = f"""\n + [[customizations.user]] + name = "{username}" + password = "{password}" + key = "{key_value}" + groups = ["wheel"] + {filesystem_size_str} + [customizations.kernel] + append = "mitigations=auto,nosmt" + """ + else: + kick_start = {"contents": "user --name %s --password %s --groups wheel\n" + "sshkey --username %s \"%s\"\ntext --non-interactive\nzerombr\n" + "clearpart --all --initlabel --disklabel=gpt\nautopart --noswap --type=lvm\n" + "network --bootproto=dhcp --device=link --activate --onboot=on\n reboot" % (username, password, username, key_value) + } + container_file_content = f"""\n + [customizations.kernel] + append = "mitigations=auto,nosmt" + [customizations.installer.modules] + enable = [ + "org.fedoraproject.Anaconda.Modules.Localization" + ] + disable = [ + "org.fedoraproject.Anaconda.Modules.Users" + ] + {filesystem_size_str} + [customizations.installer.kickstart] + contents = \"""{kick_start.get("contents")}\""" + """ + LOG.debug("what is cfg:%s", cfg) + config_toml_path = pathlib.Path(folder) / "config.toml" + config_toml_path.write_text(textwrap.dedent(container_file_content), encoding="utf8") + return os.path.join(folder, "config.toml") + + def create_auth_json_file(params): """ create authentication json configuration file @@ -279,9 +370,12 @@ def create_and_build_container_file(params): folder = params.get("container_base_folder") build_container = params.get("build_container") container_tag = params.get("container_url") + manifest = params.get("manifest") # clean up existed image clean_image_cmd = "sudo podman rmi %s" % container_tag + if manifest: + clean_image_cmd = "sudo podman manifest rm %s" % container_tag process.run(clean_image_cmd, shell=True, ignore_status=True) etc_config = '' dnf_vmware_tool = '' @@ -308,8 +402,10 @@ def create_and_build_container_file(params): "update-crypto-policies --no-reload --set FIPS " container_path = pathlib.Path(folder) / "Containerfile_tmp" - shutil.copy("/etc/yum.repos.d/beaker-BaseOS.repo", folder) - shutil.copy("/etc/yum.repos.d/beaker-AppStream.repo", folder) + if os.path.exists("/etc/yum.repos.d/beaker-BaseOS.repo"): + shutil.copy("/etc/yum.repos.d/beaker-BaseOS.repo", folder) + if os.path.exists("/etc/yum.repos.d/beaker-AppStream.repo"): + shutil.copy("/etc/yum.repos.d/beaker-AppStream.repo", folder) create_sudo_file = "RUN echo '%wheel ALL=(ALL) NOPASSWD: ALL' > /etc/sudoers.d/wheel-passwordless-sudo" enable_root_ssh = "RUN echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config.d/01-permitrootlogin.conf" @@ -382,9 +478,16 @@ def create_and_build_container_file(params): {dnf_fips_install} Run dnf clean all """ + build_cmd = "sudo podman build -t %s -f %s" % (container_tag, str(container_path)) + if manifest: + container_file_content = f"""\n + FROM {build_container} + {create_sudo_file} + {enable_root_ssh} + """ + build_cmd = "sudo podman build --platform linux/arm64,linux/amd64 --manifest %s -f %s" % (manifest, str(container_path)) container_path.write_text(textwrap.dedent(container_file_content), encoding="utf8") - build_cmd = "sudo podman build -t %s -f %s" % (container_tag, str(container_path)) process.run(build_cmd, shell=True, ignore_status=False) @@ -774,7 +877,12 @@ def create_qemu_vm(params, env, test): vm = env.get_vm(vm_name) if vm.is_dead(): LOG.debug("VM is dead, starting") - vm.start() + # workaround VM can not start in the first time on rhel10 + try: + vm.start() + except Exception as ex: + LOG.debug("start vm in retries") + vm.start() ip_address = vm.wait_for_get_address(nic_index=0) params.update({"ip_address": ip_address.strip()}) remote_vm_obj = verify_ssh_login_vm(params) @@ -814,7 +922,9 @@ def prepare_aws_env(params): create_aws_secret_file(aws_secret_folder, aws_access_key_id, aws_access_key) aws_utils.create_aws_credentials_file(aws_access_key_id, aws_access_key) aws_utils.create_aws_config_file(aws_region) - aws_utils.install_aws_cli_tool(params) + vm_arch_name = params.get("vm_arch_name", "x86_64") + if "s390x" not in vm_arch_name: + aws_utils.install_aws_cli_tool(params) def cleanup_aws_env(params): @@ -838,6 +948,73 @@ def cleanup_aws_ami_and_snapshot(params): aws_utils.delete_aws_ami_snapshot_id(params) +def convert_vhd_to_qcow2(params): + """ + Convert vhd disk format into qcow2 + + @param params: one dictionary wrapping various parameter + :return: Converted image path + """ + original_image_path = params.get('vm_disk_image_path') + converted_image_path = original_image_path.replace("vhd", "qcow2") + LOG.debug(f"converted vhd to qcow2 output is : {converted_image_path}") + + convert_cmd = f"qemu-img convert -p -f vpc -O qcow2 {original_image_path} {converted_image_path}" + process.run(convert_cmd, shell=True, verbose=True, ignore_status=False) + return converted_image_path + + +def untar_tgz_to_raw(params): + """ + extract image.tgz for GCP format to raw format:disk.raw + + @param params: one dictionary wrapping various parameter + """ + original_image_path = params.get('vm_disk_image_path') + tar_image_folder = os.path.dirname(original_image_path) + untar_image_path = os.path.join(tar_image_folder, "disk.raw") + LOG.debug(f"untar image.tgz to gce output is : {tar_image_folder}") + + tar_cmd = f"tar -xvzf {original_image_path} -C {tar_image_folder}" + process.run(tar_cmd, shell=True, verbose=True, ignore_status=False) + return untar_image_path + + +def check_bootc_image_version_id(params): + """ + check bootc image version id + + @param params: one dictionary wrapping various parameter + """ + expected_redhat_version_id = params.get("redhat_version_id") + if expected_redhat_version_id is None: + LOG.debug("don't need to check redhat version id") + else: + bootc_meta_info_dict = get_bootc_image_meta_info(params) + redhat_version_id = bootc_meta_info_dict.get("redhat.version-id") + compose_id = bootc_meta_info_dict.get("redhat.compose-id") + expected_compose_id = "RHEL-{}".format(expected_redhat_version_id) + if expected_redhat_version_id != redhat_version_id: + raise exceptions.TestFail(f"Expected redhat version id :{expected_redhat_version_id}, real version id is: {redhat_version_id}") + if expected_compose_id not in compose_id: + raise exceptions.TestFail(f"Expected compose id :{expected_compose_id}, real compose id is: {compose_id}") + + +def get_bootc_image_meta_info(params): + """ + get bootc image meta information + + @param params: one dictionary wrapping various parameter + """ + container_url = params.get('container_url') + cmd = "sudo skopeo inspect --retry-times=5 --tls-verify=false docker://%s |jq -r '.Labels'" % container_url + ret = process.run(cmd, timeout=40, verbose=True, ignore_status=True, shell=True).stdout_text + LOG.debug(f"skopeo inspect bootc image output is : {ret}") + + bootc_meta_info_dict = eval(ret) + return bootc_meta_info_dict + + def get_baseurl_from_repo_file(repo_file_path): """ One method to get compose url from current repository file diff --git a/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_build.cfg b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_build.cfg index 5bd0b929cd..7781475df6 100644 --- a/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_build.cfg +++ b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_build.cfg @@ -1,6 +1,6 @@ - bootc_image_builder.bib.disk_image_generation: type = bootc_disk_image_build - only x86_64, aarch64 + only x86_64, aarch64, s390x, ppc64le start_vm = False take_regular_screendumps = "no" start_vm = "no" @@ -23,10 +23,13 @@ - use_config_json: os_username = "alice" os_password = "bob" + qcow..upstream_bib: + filesystem_size_set = "yes" anaconda-iso..upstream_bib..fedora_40: kickstart = "yes" anaconda-iso..rhel_9.5_nightly_bib..local_image: kickstart = "yes" + filesystem_size_set = "yes" - unuse_config_json: variants image_ref: - centos: @@ -49,37 +52,47 @@ roofs = "ext4" qcow..upstream_bib: roofs = "xfs" + no s390-virtio - fedora_latest: only upstream_bib..tls_verify_disable container_url = "quay.io/fedora/fedora-bootc:latest" roofs = "xfs" raw..upstream_bib: roofs = "ext4" + s390-virtio: + container_url = "quay.io/fedora/fedora-bootc:41" - local_image: container_base_folder = "/var/lib/libvirt/images" container_url = "localhost/bootc:eln" local_container = "yes" build_container = "registry.stage.redhat.io/rhel9/rhel-bootc:rhel-9.4" - rhel_9.5_nightly_bib: - build_container = "registry.stage.redhat.io/rhel9/rhel-bootc:rhel-9.5" + rhel_9.5_nightly_bib, upstream_bib: + build_container = "registry.stage.redhat.io/rhel9/rhel-bootc:9.5" rhel_10.0_bib: - build_container = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:rhel-10.0-beta" + build_container = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:10.0-beta" - rhel_9.4: build_container = "registry.redhat.io/rhel9/rhel-bootc:9.4" container_url = "quay.io/wenbaoxin/rhel9test" only rhel_9.4_bib - rhel_9.5_nightly: - container_url = "registry.stage.redhat.io/rhel9/rhel-bootc:rhel-9.5" + container_url = "registry.stage.redhat.io/rhel9/rhel-bootc:9.5" only rhel_9.5_nightly_bib + redhat_version_id = "9.5" no anaconda-iso - rhel_10.0_nightly: - container_url = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:rhel-10.0-beta" + container_url = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:10.0-beta" enable_tls_verify = "false" + redhat_version_id = "10.0-beta" only rhel_10.0_bib no anaconda-iso - cross_build: - container_url = "quay.io/centos-bootc/centos-bootc:stream9" + local_container = "yes" + manifest = "manifest-test" + container_base_folder = "/var/lib/libvirt/images" + build_container = "quay.io/centos-bootc/centos-bootc-dev:stream9" + container_url = "localhost/manifest-test" target_arch = "aarch64" + roofs = "btrfs" only qcow..upstream_bib..use_config_json..tls_verify_enable variants bib_ref: - upstream_bib: @@ -158,3 +171,13 @@ disk_image_type = "raw" output_sub_folder = "image" output_name = "disk.raw" + - vhd: + disk_image_type = "vhd" + output_sub_folder = "vpc" + output_name = "disk.vhd" + only upstream_bib + - gce: + disk_image_type = "gce" + output_sub_folder = "gce" + output_name = "image.tar.gz" + only upstream_bib diff --git a/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_install.cfg b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_install.cfg index 800f6f1fad..71de52ed58 100644 --- a/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_install.cfg +++ b/virttools/tests/cfg/bootc_image_builder/bootc_disk_image_install.cfg @@ -1,6 +1,6 @@ - bootc_image_builder.bib.disk_image_install: type = bootc_disk_image_install - only x86_64, aarch64 + only x86_64, aarch64, s390x, ppc64le start_vm = False take_regular_screendumps = "no" start_vm = "no" @@ -52,12 +52,17 @@ roofs = "xfs" anaconda-iso..upstream_bib: kickstart = "yes" + raw..upstream_bib: + filesystem_size_set = "yes" + no s390-virtio - fedora_latest: only upstream_bib container_url = "quay.io/fedora/fedora-bootc:latest" roofs = "xfs" raw..upstream_bib: roofs = "ext4" + s390-virtio: + container_url = "quay.io/fedora/fedora-bootc:41" - rhel_9.4: build_container = "registry.redhat.io/rhel9/rhel-bootc:9.4" container_url = "quay.io/wenbaoxin/rhel9test" @@ -67,18 +72,18 @@ container_url = "localhost/bootc:eln" local_container = "yes" build_container = "registry.stage.redhat.io/rhel9/rhel-bootc:rhel-9.4" - rhel_9.5_nightly_bib: - build_container = "registry.stage.redhat.io/rhel9/rhel-bootc:rhel-9.5" + rhel_9.5_nightly_bib, upstream_bib: + build_container = "registry.stage.redhat.io/rhel9/rhel-bootc:9.5" fips_enable = "yes" rhel_10.0_bib: - build_container = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:rhel-10.0-beta" + build_container = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:10.0-beta" - rhel_9.5_nightly: - container_url = "registry.stage.redhat.io/rhel9/rhel-bootc:rhel-9.5" + container_url = "registry.stage.redhat.io/rhel9/rhel-bootc:9.5" enable_tls_verify = "false" only rhel_9.5_nightly_bib no anaconda-iso - rhel_10.0_nightly: - container_url = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:rhel-10.0-beta" + container_url = "registry.stage.redhat.io/rhel10-beta/rhel-bootc:10.0-beta" enable_tls_verify = "false" only rhel_10.0_bib no anaconda-iso @@ -168,3 +173,13 @@ disk_image_type = "raw" output_sub_folder = "image" output_name = "disk.raw" + - vhd: + disk_image_type = "vhd" + output_sub_folder = "vpc" + output_name = "disk.vhd" + only upstream_bib + - gce: + disk_image_type = "gce" + output_sub_folder = "gce" + output_name = "image.tar.gz" + only upstream_bib diff --git a/virttools/tests/src/bootc_image_builder/bootc_disk_image_build.py b/virttools/tests/src/bootc_image_builder/bootc_disk_image_build.py index 7fe6ebe5c3..b10650a640 100644 --- a/virttools/tests/src/bootc_image_builder/bootc_disk_image_build.py +++ b/virttools/tests/src/bootc_image_builder/bootc_disk_image_build.py @@ -37,6 +37,8 @@ def validate_bib_output(params, test): if formatted_group_user != ownership: test.fail(f"The output folder:{base_folder} has wrong setting in group and user ids: {formatted_group_user}") + bib_utils.check_bootc_image_version_id(params) + def prepare_env_and_execute_bib(params, test): """ diff --git a/virttools/tests/src/bootc_image_builder/bootc_disk_image_install.py b/virttools/tests/src/bootc_image_builder/bootc_disk_image_install.py index 419555403a..dd1e989cc5 100644 --- a/virttools/tests/src/bootc_image_builder/bootc_disk_image_install.py +++ b/virttools/tests/src/bootc_image_builder/bootc_disk_image_install.py @@ -45,6 +45,15 @@ def update_bib_env_info(params, test): params.update({'vm_disk_image_path': full_path_dest}) params.update({'vm_name_bootc': disk_name}) + if params.get("disk_image_type") == "vhd": + converted_image_from_vhd_qcow2 = bib_utils.convert_vhd_to_qcow2(params) + params.update({'vm_disk_image_path': converted_image_from_vhd_qcow2}) + + if params.get("disk_image_type") == "gce": + untar_raw_image = bib_utils.untar_tgz_to_raw(params) + params.update({'vm_disk_image_path': untar_raw_image}) + cleanup_files.append(untar_raw_image) + iso_install_path = os.path.join(libvirt_base_folder, f"{disk_name}_{firmware}.qcow2") params.update({'iso_install_path': iso_install_path}) cleanup_files.append(iso_install_path) @@ -128,7 +137,11 @@ def run(test, params, env): update_bib_env_info(params, test) if disk_image_type in ["vmdk"]: bib_utils.create_and_start_vmware_vm(params) - elif disk_image_type in ["qcow2", "raw", "anaconda-iso"]: + elif disk_image_type in ["qcow2", "raw", "anaconda-iso", "vhd", "gce"]: + # clean up dirty VM if existed + vm_name = params.get("vm_name_bootc") + if vm_name and vm_name in virsh.dom_list().stdout_text: + virsh.undefine(vm_name, options="--nvram", ignore_status=True) bib_utils.create_qemu_vm(params, env, test) elif disk_image_type in ["ami"]: if len(aws_config_dict) != 0: