From bd6645c74eca46cd086185c456bb923e60e7198a Mon Sep 17 00:00:00 2001 From: xin liang Date: Wed, 9 Oct 2024 08:48:44 +0800 Subject: [PATCH 1/3] Dev: ocfs2: Drop support for configuring ocfs2 (jsc#PED-11038) --- crmsh/bootstrap.py | 30 +--- crmsh/ocfs2.py | 346 -------------------------------------- crmsh/report/collect.py | 69 -------- crmsh/report/constants.py | 3 +- crmsh/ui_cluster.py | 16 -- 5 files changed, 3 insertions(+), 461 deletions(-) delete mode 100644 crmsh/ocfs2.py diff --git a/crmsh/bootstrap.py b/crmsh/bootstrap.py index a561567110..634c0cdd47 100644 --- a/crmsh/bootstrap.py +++ b/crmsh/bootstrap.py @@ -35,7 +35,6 @@ from . import userdir from .constants import SSH_OPTION, QDEVICE_HELP_INFO, STONITH_TIMEOUT_DEFAULT,\ REJOIN_COUNT, REJOIN_INTERVAL, PCMK_DELAY_MAX, CSYNC2_SERVICE, WAIT_TIMEOUT_MS_DEFAULT -from . import ocfs2 from . import qdevice from . import parallax from . import log @@ -71,7 +70,7 @@ "/etc/samba/smb.conf", SYSCONFIG_NFS, SYSCONFIG_PCMK, SYSCONFIG_SBD, PCMK_REMOTE_AUTH, WATCHDOG_CFG, PROFILES_FILE, CRM_CFG, SBD_SYSTEMD_DELAY_START_DIR) -INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", "ocfs2", "admin", "qdevice") +INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", "admin", "qdevice") INIT_STAGES_INTERNAL = ("csync2_remote", "qnetd_remote", "remote_auth") INIT_STAGES_ALL = INIT_STAGES_EXTERNAL + INIT_STAGES_INTERNAL JOIN_STAGES_EXTERNAL = ("ssh", "csync2", "ssh_merge", "cluster") @@ -112,9 +111,6 @@ def __init__(self): self.qdevice_heuristics = None self.qdevice_heuristics_mode = None self.qdevice_rm_flag = None - self.ocfs2_devices = [] - self.use_cluster_lvm2 = None - self.mount_point = None self.cluster_node = None self.force = None self.arbitrator = None @@ -264,7 +260,7 @@ def _validate_stage(self): if self.type == "init": if self.stage not in INIT_STAGES_ALL: utils.fatal(f"Invalid stage: {self.stage}(available stages: {', '.join(INIT_STAGES_EXTERNAL)})") - if self.stage in ("admin", "qdevice", "ocfs2") and not self.cluster_is_running: + if self.stage in ("admin", "qdevice") and not self.cluster_is_running: utils.fatal(f"Cluster is inactive, can't run '{self.stage}' stage") if self.stage in ("corosync", "cluster") and self.cluster_is_running: utils.fatal(f"Cluster is active, can't run '{self.stage}' stage") @@ -283,8 +279,6 @@ def validate_option(self): """ if self.qdevice_inst: self.qdevice_inst.valid_qdevice_options() - if self.ocfs2_devices or self.stage == "ocfs2": - ocfs2.OCFS2Manager.verify_ocfs2(self) if not self.skip_csync2 and self.type == "init": self.skip_csync2 = utils.get_boolean(os.getenv("SKIP_CSYNC2_SYNC")) if self.skip_csync2 and self.stage: @@ -1402,16 +1396,6 @@ def init_sbd(): _context.sbd_manager.sbd_init() -def init_ocfs2(): - """ - OCFS2 configure process - """ - if not _context.ocfs2_devices: - return - ocfs2_manager = ocfs2.OCFS2Manager(_context) - ocfs2_manager.init_ocfs2() - - def init_cluster(): """ Initial cluster configuration. @@ -2228,7 +2212,6 @@ def bootstrap_init(context): init_cluster() init_admin() init_qdevice() - init_ocfs2() except lock.ClaimLockError as err: utils.fatal(err) @@ -2326,7 +2309,6 @@ def bootstrap_join(context): join_csync2(cluster_node, remote_user) join_ssh_merge(cluster_node, remote_user) probe_partitions() - join_ocfs2(cluster_node, remote_user) join_cluster(cluster_node, remote_user) except (lock.SSHError, lock.ClaimLockError) as err: utils.fatal(err) @@ -2338,14 +2320,6 @@ def bootstrap_finished(): logger.info("Done (log saved to %s on %s)", log.CRMSH_LOG_FILE, utils.this_node()) -def join_ocfs2(peer_host, peer_user): - """ - If init node configured OCFS2 device, verify that device on join node - """ - ocfs2_inst = ocfs2.OCFS2Manager(_context) - ocfs2_inst.join_ocfs2(peer_host) - - def remove_qdevice() -> None: """ Remove qdevice service and configuration from cluster diff --git a/crmsh/ocfs2.py b/crmsh/ocfs2.py deleted file mode 100644 index 346cc5c20e..0000000000 --- a/crmsh/ocfs2.py +++ /dev/null @@ -1,346 +0,0 @@ -import re -from contextlib import contextmanager -from . import utils, sh -from . import bootstrap -from . import ra -from . import corosync -from . import log -from . import xmlutil -from . import constants -from .service_manager import ServiceManager - -logger = log.setup_logger(__name__) -logger_utils = log.LoggerUtils(logger) - - -class OCFS2Manager(object): - """ - Class to manage OCFS2 and configure related resources - """ - RA_ID_PREFIX = "ocfs2-" - DLM_RA_ID = "{}dlm".format(RA_ID_PREFIX) - FS_RA_ID = "{}clusterfs".format(RA_ID_PREFIX) - LVMLOCKD_RA_ID = "{}lvmlockd".format(RA_ID_PREFIX) - LVMACTIVATE_RA_ID = "{}lvmactivate".format(RA_ID_PREFIX) - GROUP_ID = "{}group".format(RA_ID_PREFIX) - CLONE_ID = "{}clone".format(RA_ID_PREFIX) - VG_ID = "{}vg".format(RA_ID_PREFIX) - LV_ID = "{}lv".format(RA_ID_PREFIX) - - MAX_CLONE_NUM = 8 - # Note: using undocumented '-x' switch to avoid prompting if overwriting - MKFS_CMD = "mkfs.ocfs2 --cluster-stack pcmk --cluster-name {} -N {} -x {}" - HINTS_WHEN_RUNNING = """ -The cluster service has already been initialized, but the prerequisites are missing -to configure OCFS2. Please fix it and use the stage procedure to configure OCFS2 separately, -e.g. crm cluster init ocfs2 -o - """ - - def __init__(self, context): - """ - Init function - """ - self.ocfs2_devices = context.ocfs2_devices - self.use_cluster_lvm2 = context.use_cluster_lvm2 - self.mount_point = context.mount_point - self.use_stage = context.stage == "ocfs2" - self.yes_to_all = context.yes_to_all - self.cluster_name = None - self.exist_ra_id_list = [] - self.vg_id = None - self.group_id = None - self.target_device = None - - def _verify_packages(self, use_cluster_lvm2=False): - """ - Find if missing required package - """ - required_packages = ["ocfs2-tools"] - if use_cluster_lvm2: - required_packages.append("lvm2-lockd") - for pkg in required_packages: - if not utils.package_is_installed(pkg): - raise ValueError("Missing required package for configuring OCFS2: {}".format(pkg)) - - def _verify_options(self): - """ - Verify options related with OCFS2 - """ - if self.use_stage and not self.ocfs2_devices: - raise ValueError("ocfs2 stage require -o option") - if len(self.ocfs2_devices) > 1 and not self.use_cluster_lvm2: - raise ValueError("Without Cluster LVM2 (-C option), -o option only support one device") - if self.use_cluster_lvm2 and not self.ocfs2_devices: - raise ValueError("-C option only valid together with -o option") - if self.mount_point and utils.has_mount_point_used(self.mount_point): - raise ValueError("Mount point {} already mounted".format(self.mount_point)) - - def _verify_devices(self): - """ - Verify ocfs2 devices - """ - for dev in self.ocfs2_devices: - if not utils.is_block_device(dev): - raise ValueError("{} doesn't look like a block device".format(dev)) - if utils.is_dev_used_for_lvm(dev) and self.use_cluster_lvm2: - raise ValueError("{} is a Logical Volume, cannot be used with the -C option".format(dev)) - if utils.has_disk_mounted(dev): - raise ValueError("{} already mounted".format(dev)) - - def _check_if_already_configured(self): - """ - Check if ocfs2 related resource already configured - """ - if not self.use_stage: - return - out = sh.cluster_shell().get_stdout_or_raise_error("crm configure show") - if "fstype=ocfs2" in out: - logger.info("Already configured OCFS2 related resources") - raise utils.TerminateSubCommand - - def _static_verify(self): - """ - Verify before configuring on init process - """ - self._verify_packages(self.use_cluster_lvm2) - self._check_if_already_configured() - self._verify_options() - self._verify_devices() - - def _dynamic_raise_error(self, error_msg): - """ - Customize error message after cluster running - """ - raise ValueError(error_msg + ("" if self.use_stage else self.HINTS_WHEN_RUNNING)) - - def _check_sbd_and_ocfs2_dev(self): - """ - Raise error when ocfs2 device is the same with sbd device - """ - from . import sbd - if ServiceManager().service_is_enabled("sbd.service"): - sbd_device_list = sbd.SBDManager.get_sbd_device_from_config() - for dev in self.ocfs2_devices: - if dev in sbd_device_list: - self._dynamic_raise_error("{} cannot be the same with SBD device".format(dev)) - - def _confirm_to_overwrite_ocfs2_dev(self): - """ - Confirm to overwrit ocfs2 device on interactive mode - """ - for dev in self.ocfs2_devices: - msg = "" - if utils.has_dev_partitioned(dev): - msg = "Found a partition table in {}".format(dev) - else: - fs_type = utils.get_dev_fs_type(dev) - if fs_type: - msg = "{} contains a {} file system".format(dev, fs_type) - if msg and not bootstrap.confirm("{} - Proceed anyway?".format(msg)): - raise utils.TerminateSubCommand - - for dev in self.ocfs2_devices: - sh.cluster_shell().get_stdout_or_raise_error("wipefs -a {}".format(dev)) - - def _dynamic_verify(self): - """ - Verify after cluster running - """ - if not utils.has_stonith_running(): - self._dynamic_raise_error("OCFS2 requires stonith device configured and running") - - self._check_sbd_and_ocfs2_dev() - self._confirm_to_overwrite_ocfs2_dev() - - def _gen_ra_scripts(self, ra_type, kv): - """ - Generate ra scripts - Return id and scripts - """ - config_scripts = "" - kv["id"] = utils.gen_unused_id(self.exist_ra_id_list, kv["id"]) - config_scripts = ra.CONFIGURE_RA_TEMPLATE_DICT[ra_type].format(**kv) - return kv["id"], config_scripts - - def _mkfs(self, target): - """ - Creating OCFS2 filesystem for the target device - """ - with logger_utils.status_long(" Creating OCFS2 filesystem for {}".format(target)): - self.cluster_name = corosync.get_value('totem.cluster_name') - sh.cluster_shell().get_stdout_or_raise_error(self.MKFS_CMD.format(self.cluster_name, self.MAX_CLONE_NUM, target)) - - @contextmanager - def _vg_change(self): - """ - vgchange process using contextmanager - """ - shell = sh.cluster_shell() - shell.get_stdout_or_raise_error("vgchange -ay {}".format(self.vg_id)) - try: - yield - finally: - shell.get_stdout_or_raise_error("vgchange -an {}".format(self.vg_id)) - - def _create_lv(self): - """ - Create PV, VG, LV and return LV path - """ - disks_string = ' '.join(self.ocfs2_devices) - shell = sh.cluster_shell() - - # Create PV - with logger_utils.status_long(" Creating PV for {}".format(disks_string)): - shell.get_stdout_or_raise_error("pvcreate {} -y".format(disks_string)) - - # Create VG - self.vg_id = utils.gen_unused_id(utils.get_all_vg_name(), self.VG_ID) - with logger_utils.status_long(" Creating VG {}".format(self.vg_id)): - shell.get_stdout_or_raise_error("vgcreate --shared {} {} -y".format(self.vg_id, disks_string)) - - # Create LV - with logger_utils.status_long(" Creating LV {} on VG {}".format(self.LV_ID, self.vg_id)): - pe_number = utils.get_pe_number(self.vg_id) - shell.get_stdout_or_raise_error("lvcreate -l {} {} -n {} -y".format(pe_number, self.vg_id, self.LV_ID)) - - return "/dev/{}/{}".format(self.vg_id, self.LV_ID) - - def _gen_group_and_clone_scripts(self, ra_list): - """ - Generate group and clone scripts - """ - # Group - group_kv = {"id":self.GROUP_ID, "ra_string":' '.join(ra_list)} - self.group_id, group_scripts = self._gen_ra_scripts("GROUP", group_kv) - # Clone - clone_kv = {"id":self.CLONE_ID, "group_id":self.group_id} - _, clone_scripts = self._gen_ra_scripts("CLONE", clone_kv) - return group_scripts + clone_scripts - - def _gen_fs_scripts(self): - """ - Generate Filesystem scripts - """ - fs_kv = { - "id": self.FS_RA_ID, - "mnt_point": self.mount_point, - "fs_type": "ocfs2", - "device": self.target_device - } - return self._gen_ra_scripts("Filesystem", fs_kv) - - def _load_append_and_wait(self, scripts, res_id, msg, need_append=True): - """ - Load scripts, append to exist group and wait resource started - """ - bootstrap.crm_configure_load("update", scripts) - if need_append: - utils.append_res_to_group(self.group_id, res_id) - bootstrap.wait_for_resource(msg, res_id) - - def _config_dlm(self): - """ - Configure DLM resource - """ - config_scripts = "" - dlm_id, dlm_scripts = self._gen_ra_scripts("DLM", {"id":self.DLM_RA_ID}) - group_clone_scripts = self._gen_group_and_clone_scripts([dlm_id]) - config_scripts = dlm_scripts + group_clone_scripts - self._load_append_and_wait(config_scripts, dlm_id, " Wait for DLM({}) start".format(dlm_id), need_append=False) - - def _config_lvmlockd(self): - """ - Configure LVMLockd resource - """ - _id, _scripts = self._gen_ra_scripts("LVMLockd", {"id":self.LVMLOCKD_RA_ID}) - self._load_append_and_wait(_scripts, _id, " Wait for LVMLockd({}) start".format(_id)) - - def _config_lvmactivate(self): - """ - Configure LVMActivate resource - """ - _id, _scripts = self._gen_ra_scripts("LVMActivate", {"id": self.LVMACTIVATE_RA_ID, "vgname": self.vg_id}) - self._load_append_and_wait(_scripts, _id, " Wait for LVMActivate({}) start".format(_id)) - - def _config_fs(self): - """ - Configure Filesystem resource - """ - utils.mkdirp(self.mount_point) - _id, _scripts = self._gen_fs_scripts() - self._load_append_and_wait(_scripts, _id, " Wait for Filesystem({}) start".format(_id)) - - def _config_resource_stack_lvm2(self): - """ - Configure dlm + lvmlockd + lvm-activate + Filesystem - """ - self._config_dlm() - self._config_lvmlockd() - self.target_device = self._create_lv() - with self._vg_change(): - self._mkfs(self.target_device) - self._config_lvmactivate() - self._config_fs() - - def _config_resource_stack_ocfs2_along(self): - """ - Configure dlm + Filesystem - """ - self._config_dlm() - self.target_device = self.ocfs2_devices[0] - self._mkfs(self.target_device) - self._config_fs() - - def init_ocfs2(self): - """ - OCFS2 configure process on init node - """ - logger.info("Configuring OCFS2") - self._dynamic_verify() - self.exist_ra_id_list = utils.all_exist_id() - - no_quorum_policy_value = utils.get_property("no-quorum-policy") - if not no_quorum_policy_value or no_quorum_policy_value != "freeze": - utils.set_property("no-quorum-policy", "freeze") - logger.info(" 'no-quorum-policy' is changed to \"freeze\"") - - if self.use_cluster_lvm2: - self._config_resource_stack_lvm2() - else: - self._config_resource_stack_ocfs2_along() - logger.info(" OCFS2 device %s mounted on %s", self.target_device, self.mount_point) - - def _find_target_on_join(self, peer): - """ - Find device name from OCF Filesystem param on peer node - """ - out = sh.cluster_shell().get_stdout_or_raise_error("crm configure show", peer) - for line in out.splitlines(): - if "fstype=ocfs2" in line: - res = re.search("device=\"(.*?)\"", line) - if res: - return res.group(1) - else: - raise ValueError("Filesystem require configure device") - return None - - def join_ocfs2(self, peer): - """ - Called on join process, to verify ocfs2 environment - """ - target = self._find_target_on_join(peer) - if not target: - return - with logger_utils.status_long("Verify OCFS2 environment"): - use_cluster_lvm2 = xmlutil.CrmMonXmlParser(peer).is_resource_configured(constants.LVMLOCKD_RA) - self._verify_packages(use_cluster_lvm2) - if utils.is_dev_a_plain_raw_disk_or_partition(target, peer): - utils.compare_uuid_with_peer_dev([target], peer) - - @classmethod - def verify_ocfs2(cls, ctx): - """ - Verify OCFS2 related packages and environment - """ - inst = cls(ctx) - inst._static_verify() diff --git a/crmsh/report/collect.py b/crmsh/report/collect.py index 24c6c71458..c3c51d6557 100644 --- a/crmsh/report/collect.py +++ b/crmsh/report/collect.py @@ -116,75 +116,6 @@ def dump_D_process() -> str: return out_string -def lsof_ocfs2_device() -> str: - """ - List open files for OCFS2 device - """ - out_string = "" - - sh_utils_inst = ShellUtils() - _, out, _ = sh_utils_inst.get_stdout_stderr("mount") - dev_list = re.findall("^(.*) on .* type ocfs2 ", out, re.MULTILINE) - for dev in dev_list: - cmd = f"lsof {dev}" - out_string += "\n\n#=====[ Command ] ==========================#\n" - out_string += f"# {cmd}\n" - _, cmd_out, _ = sh_utils_inst.get_stdout_stderr(cmd) - if cmd_out: - out_string += cmd_out - - return out_string - - -def ocfs2_commands_output() -> str: - """ - Run ocfs2 related commands, return outputs - """ - out_string = "" - - cmds = [ - "dmesg", - "ps -efL", - "lsblk -o 'NAME,KNAME,MAJ:MIN,FSTYPE,LABEL,RO,RM,MODEL,SIZE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,MOUNTPOINT'", - "mounted.ocfs2 -f", - "findmnt", - "mount", - "cat /sys/fs/ocfs2/cluster_stack" - ] - for cmd in cmds: - cmd_name = cmd.split()[0] - if not shutil.which(cmd_name): - continue - if cmd_name == "cat" and not os.path.exists(cmd.split()[1]): - continue - out_string += "\n\n#===== [ Command ] ==========================#\n" - out_string += f"# {cmd}\n" - out_string += utils.get_cmd_output(cmd) - - return out_string - - -def collect_ocfs2_info(context: core.Context) -> None: - """ - Collects OCFS2 information - """ - out_string = "" - rc, out, err = ShellUtils().get_stdout_stderr("mounted.ocfs2 -d") - if rc != 0: - out_string += f"Failed to run \"mounted.ocfs2 -d\": {err}" - # No ocfs2 device, just header line printed - elif len(out.split('\n')) == 1: - out_string += "No ocfs2 partitions found" - else: - out_string += dump_D_process() - out_string += lsof_ocfs2_device() - out_string += ocfs2_commands_output() - - ocfs2_f = os.path.join(context.work_dir, constants.OCFS2_F) - logger.debug(f"Dump OCFS2 information into {utils.real_path(ocfs2_f)}") - crmutils.str2file(out_string, ocfs2_f) - - def collect_ratraces(context: core.Context) -> None: """ Collect ra trace file from default /var/lib/heartbeat/trace_ra and custom one diff --git a/crmsh/report/constants.py b/crmsh/report/constants.py index 7da7b36010..62ba976270 100644 --- a/crmsh/report/constants.py +++ b/crmsh/report/constants.py @@ -28,7 +28,7 @@ """ -PACKAGES = "booth cluster-glue cluster-glue-libs corosync corosync-qdevice corosync-qnetd corosync-testagents crmsh crmsh-scripts csync2 doxygen2man drbd-utils gfs2-kmp-default gfs2-utils hawk-apiserver ldirectord libcfg6 libcmap4 libcorosync_common4 libcpg4 libdlm libdlm3 libqb-tools libqb100 libquorum5 libsam4 libtotem_pg5 libvotequorum8 linstor linstor-common linstor-controller linstor-satellite monitoring-plugins-metadata o2locktop ocfs2-tools ocfs2-tools-o2cb omping pacemaker pacemaker-cli pacemaker-cts pacemaker-libs pacemaker-remote pacemaker-schemas patterns-ha pssh python-pssh python3-linstor python3-linstor-client python3-pacemaker python3-parallax resource-agents resource-agents-zfs ruby2.5-rubygem-sass-listen ruby2.5-rubygem-sass-listen-doc sbd" +PACKAGES = "booth cluster-glue cluster-glue-libs corosync corosync-qdevice corosync-qnetd corosync-testagents crmsh crmsh-scripts csync2 doxygen2man drbd-utils gfs2-kmp-default gfs2-utils hawk-apiserver ldirectord libcfg6 libcmap4 libcorosync_common4 libcpg4 libdlm libdlm3 libqb-tools libqb100 libquorum5 libsam4 libtotem_pg5 libvotequorum8 linstor linstor-common linstor-controller linstor-satellite monitoring-plugins-metadata omping pacemaker pacemaker-cli pacemaker-cts pacemaker-libs pacemaker-remote pacemaker-schemas patterns-ha pssh python-pssh python3-linstor python3-linstor-client python3-pacemaker python3-parallax resource-agents resource-agents-zfs ruby2.5-rubygem-sass-listen ruby2.5-rubygem-sass-listen-doc sbd" ANALYSIS_F = "analysis.txt" COREDUMP_F = "coredump_info.txt" @@ -55,7 +55,6 @@ PCMKCONF = "/etc/sysconfig/pacemaker" SYSINFO_F = "sysinfo.txt" SYSSTATS_F = "sysstats.txt" -OCFS2_F = "ocfs2.txt" SBD_F = "sbd.txt" QDEVICE_F = "quorum_qdevice_qnetd.txt" OSRELEASE = "/etc/os-release" diff --git a/crmsh/ui_cluster.py b/crmsh/ui_cluster.py index d951e5f14d..8da029d8fe 100644 --- a/crmsh/ui_cluster.py +++ b/crmsh/ui_cluster.py @@ -325,7 +325,6 @@ def do_init(self, context, *args): corosync Configure corosync sbd Configure SBD (requires -s ) cluster Bring the cluster online - ocfs2 Configure OCFS2 (requires -o ) NOTE: this is a Technical Preview admin Create administration virtual IP (optional) qdevice Configure qdevice and qnetd @@ -353,12 +352,6 @@ def do_init(self, context, *args): # Setup the cluster on the current node, with QDevice crm cluster init --qnetd-hostname -y - # Setup the cluster on the current node, with SBD+OCFS2 - crm cluster init -s -o -y - - # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM - crm cluster init -s -o -o -C -y - # Add SBD on a running cluster crm cluster init sbd -s -y @@ -370,9 +363,6 @@ def do_init(self, context, *args): # Add QDevice on a running cluster crm cluster init qdevice --qnetd-hostname -y - - # Add OCFS2+Cluster LVM on a running cluster - crm cluster init ocfs2 -o -o -C -y """, add_help=False, formatter_class=RawDescriptionHelpFormatter) parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message") @@ -421,12 +411,6 @@ def do_init(self, context, *args): storage_group = parser.add_argument_group("Storage configuration", "Options for configuring shared storage.") storage_group.add_argument("-s", "--sbd-device", dest="sbd_devices", metavar="DEVICE", action=CustomAppendAction, default=[], help="Block device to use for SBD fencing, use \";\" as separator or -s multiple times for multi path (up to 3 devices)") - storage_group.add_argument("-o", "--ocfs2-device", dest="ocfs2_devices", metavar="DEVICE", action=CustomAppendAction, default=[], - help="Block device to use for OCFS2; When using Cluster LVM2 to manage the shared storage, user can specify one or multiple raw disks, use \";\" as separator or -o multiple times for multi path (must specify -C option) NOTE: this is a Technical Preview") - storage_group.add_argument("-C", "--cluster-lvm2", action="store_true", dest="use_cluster_lvm2", - help="Use Cluster LVM2 (only valid together with -o option) NOTE: this is a Technical Preview") - storage_group.add_argument("-m", "--mount-point", dest="mount_point", metavar="MOUNT", default="/srv/clusterfs", - help="Mount point for OCFS2 device (default is /srv/clusterfs, only valid together with -o option) NOTE: this is a Technical Preview") options, args = parse_options(parser, args) if options is None or args is None: From 9101a1ed7499e5d4b426766fa24c682215706566 Mon Sep 17 00:00:00 2001 From: xin liang Date: Wed, 9 Oct 2024 09:10:31 +0800 Subject: [PATCH 2/3] Dev: unittests: Adjust unit test for previous commit --- test/unittests/test_ocfs2.py | 465 -------------------------- test/unittests/test_report_collect.py | 68 ---- 2 files changed, 533 deletions(-) delete mode 100644 test/unittests/test_ocfs2.py diff --git a/test/unittests/test_ocfs2.py b/test/unittests/test_ocfs2.py deleted file mode 100644 index 603c68d6c5..0000000000 --- a/test/unittests/test_ocfs2.py +++ /dev/null @@ -1,465 +0,0 @@ -import logging -import unittest -try: - from unittest import mock -except ImportError: - import mock -from crmsh import ocfs2, utils, ra, constants - -logging.basicConfig(level=logging.INFO) - -class TestOCFS2Manager(unittest.TestCase): - """ - Unitary tests for crmsh.bootstrap.SBDManager - """ - - @classmethod - def setUpClass(cls): - """ - Global setUp. - """ - - def setUp(self): - """ - Test setUp. - """ - context1 = mock.Mock(ocfs2_devices=[]) - self.ocfs2_inst1 = ocfs2.OCFS2Manager(context1) - - context2 = mock.Mock(ocfs2_devices=[], - stage="ocfs2", - yes_to_all=True) - self.ocfs2_inst2 = ocfs2.OCFS2Manager(context2) - - context3 = mock.Mock(ocfs2_devices=["/dev/sdb2", "/dev/sdc2"], - use_cluster_lvm2=False) - self.ocfs2_inst3 = ocfs2.OCFS2Manager(context3) - - context4 = mock.Mock(ocfs2_devices=[], - use_cluster_lvm2=True) - self.ocfs2_inst4 = ocfs2.OCFS2Manager(context4) - - context5 = mock.Mock(ocfs2_devices=["/dev/sda2", "/dev/sda2"]) - self.ocfs2_inst5 = ocfs2.OCFS2Manager(context5) - - context6 = mock.Mock(ocfs2_devices=["/dev/sda2"], - mount_point="/data") - self.ocfs2_inst6 = ocfs2.OCFS2Manager(context6) - - context7 = mock.Mock(ocfs2_devices=["/dev/sdb2"], - use_cluster_lvm2=True) - self.ocfs2_inst7 = ocfs2.OCFS2Manager(context7) - - def tearDown(self): - """ - Test tearDown. - """ - - @classmethod - def tearDownClass(cls): - """ - Global tearDown. - """ - - @mock.patch('crmsh.utils.package_is_installed') - def test_verify_packages(self, mock_installed): - mock_installed.side_effect = [True, False] - with self.assertRaises(ValueError) as err: - self.ocfs2_inst1._verify_packages(use_cluster_lvm2=True) - self.assertEqual("Missing required package for configuring OCFS2: lvm2-lockd", str(err.exception)) - mock_installed.assert_has_calls([ - mock.call("ocfs2-tools"), - mock.call("lvm2-lockd") - ]) - - def test_verify_options_stage_miss_option(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst2._verify_options() - self.assertEqual("ocfs2 stage require -o option", str(err.exception)) - - def test_verify_options_two_devices(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst3._verify_options() - self.assertEqual("Without Cluster LVM2 (-C option), -o option only support one device", str(err.exception)) - - def test_verify_options_only_C(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst4._verify_options() - self.assertEqual("-C option only valid together with -o option", str(err.exception)) - - @mock.patch('crmsh.utils.has_mount_point_used') - def test_verify_options_mount(self, mock_mount): - mock_mount.return_value = True - with self.assertRaises(ValueError) as err: - self.ocfs2_inst6._verify_options() - self.assertEqual("Mount point /data already mounted", str(err.exception)) - mock_mount.assert_called_once_with("/data") - - @mock.patch('crmsh.utils.is_block_device') - def test_verify_devices_not_block(self, mock_is_block): - mock_is_block.return_value = False - with self.assertRaises(ValueError) as err: - self.ocfs2_inst3._verify_devices() - self.assertEqual("/dev/sdb2 doesn't look like a block device", str(err.exception)) - mock_is_block.assert_called_once_with("/dev/sdb2") - - @mock.patch('crmsh.utils.is_dev_used_for_lvm') - @mock.patch('crmsh.utils.is_block_device') - def test_verify_devices_lvm(self, mock_is_block, mock_lvm): - mock_lvm.return_value = True - mock_is_block.return_value = True - with self.assertRaises(ValueError) as err: - self.ocfs2_inst7._verify_devices() - self.assertEqual("/dev/sdb2 is a Logical Volume, cannot be used with the -C option", str(err.exception)) - mock_is_block.assert_called_once_with("/dev/sdb2") - mock_lvm.assert_called_once_with("/dev/sdb2") - - @mock.patch('crmsh.utils.has_disk_mounted') - @mock.patch('crmsh.utils.is_dev_used_for_lvm') - @mock.patch('crmsh.utils.is_block_device') - def test_verify_devices_mounted(self, mock_is_block, mock_lvm, mock_mounted): - mock_lvm.return_value = False - mock_is_block.return_value = True - mock_mounted.return_value = True - with self.assertRaises(ValueError) as err: - self.ocfs2_inst7._verify_devices() - self.assertEqual("/dev/sdb2 already mounted", str(err.exception)) - mock_is_block.assert_called_once_with("/dev/sdb2") - mock_lvm.assert_called_once_with("/dev/sdb2") - mock_mounted.assert_called_once_with("/dev/sdb2") - - def test_check_if_already_configured_return(self): - self.ocfs2_inst3._check_if_already_configured() - - @mock.patch('logging.Logger.info') - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_check_if_already_configured(self, mock_run, mock_info): - mock_run.return_value = "data xxx fstype=ocfs2 sss" - with self.assertRaises(utils.TerminateSubCommand): - self.ocfs2_inst2._check_if_already_configured() - mock_run.assert_called_once_with("crm configure show") - mock_info.assert_called_once_with("Already configured OCFS2 related resources") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_devices') - @mock.patch('crmsh.ocfs2.OCFS2Manager._check_if_already_configured') - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_options') - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages') - def test_static_verify(self, mock_verify_packages, mock_verify_options, mock_configured, mock_verify_devices): - self.ocfs2_inst3._static_verify() - mock_verify_packages.assert_called_once_with(False) - mock_verify_options.assert_called_once_with() - mock_configured.assert_called_once_with() - mock_verify_devices.assert_called_once_with() - - def test_dynamic_raise_error(self): - with self.assertRaises(ValueError) as err: - self.ocfs2_inst2._dynamic_raise_error("error messages") - self.assertEqual("error messages", str(err.exception)) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error') - @mock.patch('crmsh.sbd.SBDManager.get_sbd_device_from_config') - @mock.patch('crmsh.service_manager.ServiceManager.service_is_enabled') - def test_check_sbd_and_ocfs2_dev(self, mock_enabled, mock_get_device, mock_error): - mock_enabled.return_value = True - mock_get_device.return_value = ["/dev/sdb2"] - self.ocfs2_inst3._check_sbd_and_ocfs2_dev() - mock_enabled.assert_called_once_with("sbd.service") - mock_get_device.assert_called_once_with() - mock_error.assert_called_once_with("/dev/sdb2 cannot be the same with SBD device") - - @mock.patch('crmsh.bootstrap.confirm') - @mock.patch('crmsh.utils.get_dev_fs_type') - @mock.patch('crmsh.utils.has_dev_partitioned') - def test_confirm_to_overwrite_ocfs2_dev(self, mock_has_parted, mock_fstype, mock_confirm): - mock_has_parted.side_effect = [True, False] - mock_fstype.return_value = "ext4" - mock_confirm.side_effect = [True, False] - with self.assertRaises(utils.TerminateSubCommand) as err: - self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev() - mock_has_parted.assert_has_calls([ - mock.call("/dev/sdb2"), - mock.call("/dev/sdc2") - ]) - mock_fstype.assert_called_once_with("/dev/sdc2") - mock_confirm.assert_has_calls([ - mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"), - mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?") - ]) - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - @mock.patch('crmsh.bootstrap.confirm') - @mock.patch('crmsh.utils.get_dev_fs_type') - @mock.patch('crmsh.utils.has_dev_partitioned') - def test_confirm_to_overwrite_ocfs2_dev_confirmed(self, mock_has_parted, mock_fstype, mock_confirm, mock_run): - mock_has_parted.side_effect = [True, False] - mock_fstype.return_value = "ext4" - mock_confirm.side_effect = [True, True] - self.ocfs2_inst3._confirm_to_overwrite_ocfs2_dev() - mock_has_parted.assert_has_calls([ - mock.call("/dev/sdb2"), - mock.call("/dev/sdc2") - ]) - mock_fstype.assert_called_once_with("/dev/sdc2") - mock_confirm.assert_has_calls([ - mock.call("Found a partition table in /dev/sdb2 - Proceed anyway?"), - mock.call("/dev/sdc2 contains a ext4 file system - Proceed anyway?") - ]) - mock_run.assert_has_calls([ - mock.call("wipefs -a /dev/sdb2"), - mock.call("wipefs -a /dev/sdc2") - ]) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_raise_error') - @mock.patch('crmsh.utils.has_stonith_running') - def test_dynamic_verify_error(self, mock_has_stonith, mock_error): - mock_has_stonith.return_value = False - mock_error.side_effect = SystemExit - with self.assertRaises(SystemExit): - self.ocfs2_inst3._dynamic_verify() - mock_has_stonith.assert_called_once_with() - mock_error.assert_called_once_with("OCFS2 requires stonith device configured and running") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._confirm_to_overwrite_ocfs2_dev') - @mock.patch('crmsh.ocfs2.OCFS2Manager._check_sbd_and_ocfs2_dev') - @mock.patch('crmsh.utils.has_stonith_running') - def test_dynamic_verify(self, mock_has_stonith, mock_check_dev, mock_confirm): - mock_has_stonith.return_value = True - self.ocfs2_inst3._dynamic_verify() - mock_has_stonith.assert_called_once_with() - mock_check_dev.assert_called_once_with() - mock_confirm.assert_called_once_with() - - @mock.patch('crmsh.utils.gen_unused_id') - def test_gen_ra_scripts(self, mock_gen_unused): - self.ocfs2_inst3.exist_ra_id_list = [] - mock_gen_unused.return_value = "g1" - res = self.ocfs2_inst3._gen_ra_scripts("GROUP", {"id": "g1", "ra_string": "d vip"}) - assert res == ("g1", "\ngroup g1 d vip") - mock_gen_unused.assert_called_once_with([], "g1") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - @mock.patch('crmsh.corosync.get_value') - @mock.patch('crmsh.log.LoggerUtils.status_long') - def test_mkfs(self, mock_long, mock_get_value, mock_run): - mock_get_value.return_value = "hacluster" - self.ocfs2_inst3._mkfs("/dev/sdb2") - mock_long.assert_called_once_with(" Creating OCFS2 filesystem for /dev/sdb2") - mock_get_value.assert_called_once_with("totem.cluster_name") - mock_run.assert_called_once_with("mkfs.ocfs2 --cluster-stack pcmk --cluster-name hacluster -N 8 -x /dev/sdb2") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_vg_change(self, mock_run): - self.ocfs2_inst3.vg_id = "vg1" - with self.ocfs2_inst3._vg_change(): - pass - mock_run.assert_has_calls([ - mock.call("vgchange -ay vg1"), - mock.call("vgchange -an vg1") - ]) - - @mock.patch('crmsh.utils.get_pe_number') - @mock.patch('crmsh.utils.gen_unused_id') - @mock.patch('crmsh.utils.get_all_vg_name') - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - @mock.patch('crmsh.log.LoggerUtils.status_long') - def test_create_lv(self, mock_long, mock_run, mock_all_vg, mock_unused, mock_pe_num): - mock_all_vg.return_value = [] - mock_unused.return_value = "vg1" - mock_pe_num.return_value = 1234 - res = self.ocfs2_inst3._create_lv() - self.assertEqual(res, "/dev/vg1/ocfs2-lv") - mock_run.assert_has_calls([ - mock.call("pvcreate /dev/sdb2 /dev/sdc2 -y"), - mock.call("vgcreate --shared vg1 /dev/sdb2 /dev/sdc2 -y"), - mock.call("lvcreate -l 1234 vg1 -n ocfs2-lv -y") - ]) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_gen_group_and_clone_scripts(self, mock_gen): - mock_gen.side_effect = [("id1", "group_script\n"), ("id2", "clone_script\n")] - res = self.ocfs2_inst3._gen_group_and_clone_scripts(["ra1", "ra2"]) - self.assertEqual(res, "group_script\nclone_script\n") - mock_gen.assert_has_calls([ - mock.call('GROUP', {'id': 'ocfs2-group', 'ra_string': 'ra1 ra2'}), - mock.call('CLONE', {'id': 'ocfs2-clone', 'group_id': 'id1'}) - ]) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_gen_fs_scripts(self, mock_gen): - mock_gen.return_value = "scripts" - self.ocfs2_inst3.mount_point = "/data" - self.ocfs2_inst3.target_device = "/dev/sda1" - res = self.ocfs2_inst3._gen_fs_scripts() - self.assertEqual(res, "scripts") - mock_gen.assert_called_once_with("Filesystem", {'id': 'ocfs2-clusterfs', 'mnt_point': '/data', 'fs_type': 'ocfs2', 'device': '/dev/sda1'}) - - @mock.patch('crmsh.bootstrap.wait_for_resource') - @mock.patch('crmsh.utils.append_res_to_group') - @mock.patch('crmsh.bootstrap.crm_configure_load') - def test_load_append_and_wait(self, mock_load, mock_append, mock_wait): - self.ocfs2_inst3.group_id = "g1" - self.ocfs2_inst3._load_append_and_wait("scripts", "res_id", "messages data") - mock_load.assert_called_once_with("update", "scripts") - mock_append.assert_called_once_with("g1", "res_id") - mock_wait.assert_called_once_with("messages data", "res_id") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_group_and_clone_scripts') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_config_dlm(self, mock_gen_ra, mock_gen_group, mock_load_wait): - mock_gen_ra.return_value = ("dlm_id", "dlm_scripts\n") - mock_gen_group.return_value = "group_scripts\n" - self.ocfs2_inst3._config_dlm() - mock_gen_ra.assert_called_once_with("DLM", {"id": "ocfs2-dlm"}) - mock_gen_group.assert_called_once_with(["dlm_id"]) - mock_load_wait.assert_called_once_with("dlm_scripts\ngroup_scripts\n", "dlm_id", " Wait for DLM(dlm_id) start", need_append=False) - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_config_lvmlockd(self, mock_gen_ra, mock_load_wait): - mock_gen_ra.return_value = ("ra_id", "ra_scripts\n") - self.ocfs2_inst3._config_lvmlockd() - mock_gen_ra.assert_called_once_with("LVMLockd", {"id": "ocfs2-lvmlockd"}) - mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMLockd(ra_id) start") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_ra_scripts') - def test_config_lvmactivate(self, mock_gen_ra, mock_load_wait): - mock_gen_ra.return_value = ("ra_id", "ra_scripts\n") - self.ocfs2_inst3.vg_id = "vg1" - self.ocfs2_inst3._config_lvmactivate() - mock_gen_ra.assert_called_once_with("LVMActivate", {"id": "ocfs2-lvmactivate", "vgname": "vg1"}) - mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for LVMActivate(ra_id) start") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._load_append_and_wait') - @mock.patch('crmsh.ocfs2.OCFS2Manager._gen_fs_scripts') - @mock.patch('crmsh.utils.mkdirp') - def test_config_fs(self, mock_mkdir, mock_gen_fs, mock_load_wait): - mock_gen_fs.return_value = ("ra_id", "ra_scripts\n") - self.ocfs2_inst3.mount_point = "/data" - self.ocfs2_inst3._config_fs() - mock_mkdir.assert_called_once_with("/data") - mock_gen_fs.assert_called_once_with() - mock_load_wait.assert_called_once_with("ra_scripts\n", "ra_id", " Wait for Filesystem(ra_id) start") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmactivate') - @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._vg_change') - @mock.patch('crmsh.ocfs2.OCFS2Manager._create_lv') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_lvmlockd') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm') - def test_config_resource_stack_lvm2(self, mock_dlm, mock_lvmlockd, mock_lv, mock_vg, mock_mkfs, mock_lvmactivate, mock_fs): - mock_lv.return_value = "/dev/sda1" - self.ocfs2_inst3._config_resource_stack_lvm2() - mock_dlm.assert_called_once_with() - mock_lvmlockd.assert_called_once_with() - mock_lv.assert_called_once_with() - mock_mkfs.assert_called_once_with("/dev/sda1") - mock_lvmactivate.assert_called_once_with() - mock_fs.assert_called_once_with() - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_fs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._mkfs') - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_dlm') - def test_config_resource_stack_ocfs2_along(self, mock_dlm, mock_mkfs, mock_fs): - self.ocfs2_inst3._config_resource_stack_ocfs2_along() - mock_dlm.assert_called_once_with() - mock_mkfs.assert_called_once_with("/dev/sdb2") - mock_fs.assert_called_once_with() - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_lvm2') - @mock.patch('crmsh.utils.set_property') - @mock.patch('crmsh.utils.get_property') - @mock.patch('crmsh.utils.all_exist_id') - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify') - @mock.patch('logging.Logger.info') - def test_init_ocfs2_lvm2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_lvm2): - mock_all_id.return_value = [] - mock_get.return_value = None - self.ocfs2_inst7.mount_point = "/data" - self.ocfs2_inst7.target_device = "/dev/vg1/lv1" - self.ocfs2_inst7.init_ocfs2() - mock_status.assert_has_calls([ - mock.call("Configuring OCFS2"), - mock.call(' \'no-quorum-policy\' is changed to "freeze"'), - mock.call(' OCFS2 device %s mounted on %s', '/dev/vg1/lv1', '/data') - ]) - mock_dynamic_verify.assert_called_once_with() - mock_all_id.assert_called_once_with() - mock_lvm2.assert_called_once_with() - - @mock.patch('crmsh.ocfs2.OCFS2Manager._config_resource_stack_ocfs2_along') - @mock.patch('crmsh.utils.set_property') - @mock.patch('crmsh.utils.get_property') - @mock.patch('crmsh.utils.all_exist_id') - @mock.patch('crmsh.ocfs2.OCFS2Manager._dynamic_verify') - @mock.patch('logging.Logger.info') - def test_init_ocfs2(self, mock_status, mock_dynamic_verify, mock_all_id, mock_get, mock_set, mock_ocfs2): - mock_all_id.return_value = [] - mock_get.return_value = None - self.ocfs2_inst3.mount_point = "/data" - self.ocfs2_inst3.target_device = "/dev/sda1" - self.ocfs2_inst3.init_ocfs2() - mock_status.assert_has_calls([ - mock.call("Configuring OCFS2"), - mock.call(' \'no-quorum-policy\' is changed to "freeze"'), - mock.call(' OCFS2 device %s mounted on %s', '/dev/sda1', '/data') - ]) - mock_dynamic_verify.assert_called_once_with() - mock_all_id.assert_called_once_with() - mock_ocfs2.assert_called_once_with() - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_find_target_on_join_none(self, mock_run): - mock_run.return_value = "data" - res = self.ocfs2_inst3._find_target_on_join("node1") - assert res is None - mock_run.assert_called_once_with("crm configure show", "node1") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_find_target_on_join_exception(self, mock_run): - mock_run.return_value = """ -params directory="/srv/clusterfs" fstype=ocfs2 - """ - with self.assertRaises(ValueError) as err: - self.ocfs2_inst3._find_target_on_join("node1") - self.assertEqual("Filesystem require configure device", str(err.exception)) - mock_run.assert_called_once_with("crm configure show", "node1") - - @mock.patch('crmsh.sh.ClusterShell.get_stdout_or_raise_error') - def test_find_target_on_join(self, mock_run): - mock_run.return_value = """ -params directory="/srv/clusterfs" fstype=ocfs2 device="/dev/sda2" - """ - res = self.ocfs2_inst3._find_target_on_join("node1") - self.assertEqual(res, "/dev/sda2") - mock_run.assert_called_once_with("crm configure show", "node1") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join') - def test_join_ocfs2_return(self, mock_find): - mock_find.return_value = None - self.ocfs2_inst3.join_ocfs2("node1") - mock_find.assert_called_once_with("node1") - - @mock.patch('crmsh.utils.compare_uuid_with_peer_dev') - @mock.patch('crmsh.utils.is_dev_a_plain_raw_disk_or_partition') - @mock.patch('crmsh.ocfs2.OCFS2Manager._verify_packages') - @mock.patch('crmsh.xmlutil.CrmMonXmlParser') - @mock.patch('crmsh.log.LoggerUtils.status_long') - @mock.patch('crmsh.ocfs2.OCFS2Manager._find_target_on_join') - def test_join_ocfs2(self, mock_find, mock_long, mock_parser, mock_verify_packages, mock_is_mapper, mock_compare): - mock_find.return_value = "/dev/sda2" - mock_parser("node1").is_resource_configured.return_value = False - mock_is_mapper.return_value = True - self.ocfs2_inst3.join_ocfs2("node1") - mock_find.assert_called_once_with("node1") - mock_verify_packages.assert_called_once_with(False) - mock_is_mapper.assert_called_once_with("/dev/sda2", "node1") - mock_compare.assert_called_once_with(["/dev/sda2"], "node1") - - @mock.patch('crmsh.ocfs2.OCFS2Manager._static_verify') - def test_verify_ocfs2(self, mock_static_verify): - context1 = mock.Mock(ocfs2_devices=[]) - ocfs2.OCFS2Manager.verify_ocfs2(context1) - mock_static_verify.assert_called_once_with() diff --git a/test/unittests/test_report_collect.py b/test/unittests/test_report_collect.py index e13443236e..a005a46331 100644 --- a/test/unittests/test_report_collect.py +++ b/test/unittests/test_report_collect.py @@ -375,74 +375,6 @@ def test_collect_ratraces(self, mock_find, mock_mkdirp, mock_copy, mock_logger, ]) mock_logger.debug.assert_called_with(f'Dump RA trace files into {mock_real_path.return_value}') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_lsof_ocfs2_device(self, mock_run): - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mount_data = """ -/dev/vda3 on /home type xfs (rw,relatime,attr2,inode64,logbufs=8,logbsize=32k,noquota) -tmpfs on /run/user/0 type tmpfs (rw,nosuid,nodev,relatime,size=169544k,nr_inodes=42386,mode=700,inode64) -/dev/sda7 on /srv/clusterfs type ocfs2 (rw,relatime,heartbeat=non - """ - mock_run_inst.get_stdout_stderr.side_effect = [(0, mount_data, None), (0, "data", None)] - res = collect.lsof_ocfs2_device() - self.assertEqual(res, "\n\n#=====[ Command ] ==========================#\n# lsof /dev/sda7\ndata") - mock_run_inst.get_stdout_stderr.assert_has_calls([ - mock.call("mount"), - mock.call("lsof /dev/sda7") - ]) - - @mock.patch('crmsh.report.utils.get_cmd_output') - @mock.patch('os.path.exists') - @mock.patch('shutil.which') - def test_ocfs2_commands_output(self, mock_which, mock_exists, mock_run): - mock_which.side_effect = [False for i in range(5)] + [True, True] - mock_exists.return_value = False - mock_run.return_value = "data" - res = collect.ocfs2_commands_output() - self.assertEqual(res, "\n\n#===== [ Command ] ==========================#\n# mount\ndata") - - @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger) - @mock.patch('crmsh.utils.str2file') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_collect_ocfs2_info_error(self, mock_run, mock_str2file, mock_debug2): - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mock_run_inst.get_stdout_stderr.return_value = (1, None, "error") - mock_ctx_inst = mock.Mock(work_dir="/opt/workdir") - collect.collect_ocfs2_info(mock_ctx_inst) - mock_str2file.assert_called_once_with('Failed to run "mounted.ocfs2 -d": error', '/opt/workdir/ocfs2.txt') - - @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger) - @mock.patch('crmsh.utils.str2file') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_collect_ocfs2_info_no_found(self, mock_run, mock_str2file, mock_debug2): - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mock_run_inst.get_stdout_stderr.return_value = (0, "data", None) - mock_ctx_inst = mock.Mock(work_dir="/opt/workdir") - collect.collect_ocfs2_info(mock_ctx_inst) - mock_str2file.assert_called_once_with('No ocfs2 partitions found', '/opt/workdir/ocfs2.txt') - - @mock.patch('crmsh.report.utils.real_path') - @mock.patch('crmsh.report.collect.ocfs2_commands_output') - @mock.patch('crmsh.report.collect.lsof_ocfs2_device') - @mock.patch('crmsh.report.collect.dump_D_process') - @mock.patch('crmsh.report.collect.logger', spec=crmsh.log.DEBUG2Logger) - @mock.patch('crmsh.utils.str2file') - @mock.patch('crmsh.report.collect.ShellUtils') - def test_collect_ocfs2_info(self, mock_run, mock_str2file, mock_debug2, mock_D, mock_lsof, mock_output, mock_real_path): - mock_real_path.return_value = constants.OCFS2_F - mock_run_inst = mock.Mock() - mock_run.return_value = mock_run_inst - mock_run_inst.get_stdout_stderr.return_value = (0, "line1\nline2", None) - mock_D.return_value = "data_D\n" - mock_lsof.return_value = "data_lsof\n" - mock_output.return_value = "data_output\n" - mock_ctx_inst = mock.Mock(work_dir="/opt/workdir") - collect.collect_ocfs2_info(mock_ctx_inst) - mock_str2file.assert_called_once_with('data_D\ndata_lsof\ndata_output\n', '/opt/workdir/ocfs2.txt') - @mock.patch('crmsh.report.utils.real_path') @mock.patch('logging.Logger.debug') @mock.patch('crmsh.utils.str2file') From 7998730dc2bfd1ce6ea6ba972f6e04c4bcdf0094 Mon Sep 17 00:00:00 2001 From: xin liang Date: Wed, 9 Oct 2024 09:14:15 +0800 Subject: [PATCH 3/3] Dev: behave: Adjust functional tests for previous commit --- data-manifest | 2 - test/features/bootstrap_options.feature | 2 +- test/features/ocfs2.feature | 61 ------------------------- test/features/steps/const.py | 24 +--------- test/run-functional-tests | 2 +- 5 files changed, 3 insertions(+), 88 deletions(-) delete mode 100644 test/features/ocfs2.feature diff --git a/data-manifest b/data-manifest index 88aa8fee06..7210be6b55 100644 --- a/data-manifest +++ b/data-manifest @@ -82,7 +82,6 @@ test/features/crm_report_normal.feature test/features/environment.py test/features/geo_setup.feature test/features/healthcheck.feature -test/features/ocfs2.feature test/features/qdevice_options.feature test/features/qdevice_setup_remove.feature test/features/qdevice_usercase.feature @@ -197,7 +196,6 @@ test/unittests/test_gv.py test/unittests/test_handles.py test/unittests/test_lock.py test/unittests/test_objset.py -test/unittests/test_ocfs2.py test/unittests/test_parallax.py test/unittests/test_parse.py test/unittests/test_prun.py diff --git a/test/features/bootstrap_options.feature b/test/features/bootstrap_options.feature index d1eb929069..8d81de6ece 100644 --- a/test/features/bootstrap_options.feature +++ b/test/features/bootstrap_options.feature @@ -42,7 +42,7 @@ Feature: crmsh bootstrap process - options @clean Scenario: Stage validation When Try "crm cluster init fdsf -y" on "hanode1" - Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, corosync, sbd, cluster, ocfs2, admin, qdevice)" in stderr + Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, corosync, sbd, cluster, admin, qdevice)" in stderr When Try "crm cluster join fdsf -y" on "hanode1" Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, ssh_merge, cluster)" in stderr When Try "crm cluster join ssh -y" on "hanode1" diff --git a/test/features/ocfs2.feature b/test/features/ocfs2.feature deleted file mode 100644 index 96a83af844..0000000000 --- a/test/features/ocfs2.feature +++ /dev/null @@ -1,61 +0,0 @@ -@ocfs2 -Feature: OCFS2 configuration/verify using bootstrap - -@clean -Scenario: Configure ocfs2 along with init process - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -y" on "hanode1" - Then Cluster service is "started" on "hanode1" - And Service "sbd" is "started" on "hanode1" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" - -@clean -Scenario: Configure cluster lvm2 + ocfs2 with init process - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - And Has disk "/dev/sda3" on "hanode1" - When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -o /dev/sda3 -C -y" on "hanode1" - Then Cluster service is "started" on "hanode1" - And Service "sbd" is "started" on "hanode1" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started" - And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" - -@clean -Scenario: Add ocfs2 alone on a running cluster - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - And Has disk "/dev/sda1" on "hanode2" - And Has disk "/dev/sda2" on "hanode2" - When Run "crm cluster init -s /dev/sda1 -y" on "hanode1" - And Run "crm cluster join -c hanode1 -y" on "hanode2" - Then Online nodes are "hanode1 hanode2" - And Service "sbd" is "started" on "hanode1" - And Service "sbd" is "started" on "hanode2" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - When Run "crm cluster init ocfs2 -o /dev/sda2 -y" on "hanode1" - Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" - -@clean -Scenario: Add cluster lvm2 + ocfs2 on a running cluster - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - And Has disk "/dev/sda1" on "hanode2" - And Has disk "/dev/sda2" on "hanode2" - When Run "crm cluster init -s /dev/sda1 -y" on "hanode1" - And Run "crm cluster join -c hanode1 -y" on "hanode2" - Then Online nodes are "hanode1 hanode2" - And Service "sbd" is "started" on "hanode1" - And Service "sbd" is "started" on "hanode2" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - When Run "crm cluster init ocfs2 -o /dev/sda2 -C -y" on "hanode1" - Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started" - And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" diff --git a/test/features/steps/const.py b/test/features/steps/const.py index 8962aa6da4..08bac0c797 100644 --- a/test/features/steps/const.py +++ b/test/features/steps/const.py @@ -134,18 +134,6 @@ Block device to use for SBD fencing, use ";" as separator or -s multiple times for multi path (up to 3 devices) - -o DEVICE, --ocfs2-device DEVICE - Block device to use for OCFS2; When using Cluster LVM2 - to manage the shared storage, user can specify one or - multiple raw disks, use ";" as separator or -o - multiple times for multi path (must specify -C option) - NOTE: this is a Technical Preview - -C, --cluster-lvm2 Use Cluster LVM2 (only valid together with -o option) - NOTE: this is a Technical Preview - -m MOUNT, --mount-point MOUNT - Mount point for OCFS2 device (default is - /srv/clusterfs, only valid together with -o option) - NOTE: this is a Technical Preview Stage can be one of: ssh Create SSH keys for passwordless SSH between cluster nodes @@ -153,7 +141,6 @@ corosync Configure corosync sbd Configure SBD (requires -s ) cluster Bring the cluster online - ocfs2 Configure OCFS2 (requires -o ) NOTE: this is a Technical Preview admin Create administration virtual IP (optional) qdevice Configure qdevice and qnetd @@ -181,12 +168,6 @@ # Setup the cluster on the current node, with QDevice crm cluster init --qnetd-hostname -y - # Setup the cluster on the current node, with SBD+OCFS2 - crm cluster init -s -o -y - - # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM - crm cluster init -s -o -o -C -y - # Add SBD on a running cluster crm cluster init sbd -s -y @@ -197,10 +178,7 @@ crm cluster init sbd -S -y # Add QDevice on a running cluster - crm cluster init qdevice --qnetd-hostname -y - - # Add OCFS2+Cluster LVM on a running cluster - crm cluster init ocfs2 -o -o -C -y''' + crm cluster init qdevice --qnetd-hostname -y''' CRM_CLUSTER_JOIN_H_OUTPUT = '''Join existing cluster diff --git a/test/run-functional-tests b/test/run-functional-tests index 37afc158af..b083298df9 100755 --- a/test/run-functional-tests +++ b/test/run-functional-tests @@ -14,7 +14,7 @@ HA_NETWORK_ARRAY[1]=$HA_NETWORK_SECOND HA_NETWORK_V6_ARRAY[0]="2001:db8:10::/64" HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64" BEHAVE_CASE_DIR="$(dirname $0)/features/" -BEHAVE_CASE_EXCLUDE="sbd|ocfs2" +BEHAVE_CASE_EXCLUDE="sbd" read -r -d '' SSHD_CONFIG_AZURE << EOM PermitRootLogin no