From 39587b42166d1d4caf3a80bce3457c8f682cd392 Mon Sep 17 00:00:00 2001 From: julpark Date: Tue, 14 Jan 2025 11:37:38 -0800 Subject: [PATCH] Merging volume mgmt test cases Signed-off-by: julpark --- .../tier-2_cephfs_test-volume-management.yaml | 12 - .../tier-2_file-dir-lay_vol-mgmt_nfs.yaml | 6 + .../tier-2_cephfs_test-volume-management.yaml | 12 - .../tier-2_file-dir-lay_vol-mgmt_nfs.yaml | 6 + ...-2_cephfs_test-volume-management_arch.yaml | 18 - .../tier-2_file-dir-lay_vol-mgmt_nfs.yaml | 12 +- tests/cephfs/cephfs_utilsV1.py | 37 + ...ephfs_vol_mgmt_rename_earmark_subvolume.py | 677 ++++++++++++++++++ .../cephfs_vol_mgmt_subvol_group_scenarios.py | 300 -------- ...hfs_vol_mgmt_subvol_idempotence_earmark.py | 279 -------- .../cephfs_vol_mgmt_volume_scenarios.py | 216 ------ 11 files changed, 732 insertions(+), 843 deletions(-) create mode 100644 tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_rename_earmark_subvolume.py delete mode 100644 tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_group_scenarios.py delete mode 100644 tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_idempotence_earmark.py delete mode 100644 tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_volume_scenarios.py diff --git a/suites/quincy/cephfs/tier-2_cephfs_test-volume-management.yaml b/suites/quincy/cephfs/tier-2_cephfs_test-volume-management.yaml index 8401082d2..2cefce2a0 100644 --- a/suites/quincy/cephfs/tier-2_cephfs_test-volume-management.yaml +++ b/suites/quincy/cephfs/tier-2_cephfs_test-volume-management.yaml @@ -343,15 +343,3 @@ tests: polarion-id: CEPH-11333 desc: File system life cycle abort-on-fail: false - - test: - name: volume related scenarios(delete,rename) - module: cephfs_vol_management.cephfs_vol_mgmt_volume_scenarios.py - polarion-id: CEPH-83603354 - desc: volume related scenarios(delete,rename) - abort-on-fail: false - - test: - name: cephfs subvolumegroup scenarios - module: cephfs_vol_management.cephfs_vol_mgmt_subvolgroup_scenarios.py - polarion-id: CEPH-83604079 - desc: cephfs subvolumegroup scenarios - abort-on-fail: false diff --git a/suites/quincy/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml b/suites/quincy/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml index ca1ae6c5f..340b9c294 100644 --- a/suites/quincy/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml +++ b/suites/quincy/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml @@ -482,3 +482,9 @@ tests: polarion-id: CEPH-83604097 desc: Basic info validation after volume creation and deletion abort-on-fail: true + - test: + name: volume rename, subvolume earmark, subvolumegroup idempotence scenarios + module: cephfs_vol_management.cephfs_vol_mgmt_rename_earmark_subvolume.py + polarion-id: CEPH-83604978 + desc: volume rename, subvolume earmark, subvolumegroup idempotence scenarios + abort-on-fail: false diff --git a/suites/reef/cephfs/tier-2_cephfs_test-volume-management.yaml b/suites/reef/cephfs/tier-2_cephfs_test-volume-management.yaml index ba9178948..474cebe09 100644 --- a/suites/reef/cephfs/tier-2_cephfs_test-volume-management.yaml +++ b/suites/reef/cephfs/tier-2_cephfs_test-volume-management.yaml @@ -340,15 +340,3 @@ tests: polarion-id: CEPH-11333 desc: File system life cycle abort-on-fail: false - - test: - name: volume related scenarios(delete,rename) - module: cephfs_vol_management.cephfs_vol_mgmt_volume_scenarios.py - polarion-id: CEPH-83603354 - desc: volume related scenarios(delete,rename) - abort-on-fail: false - - test: - name: cephfs subvolumegroup scenarios - module: cephfs_vol_management.cephfs_vol_mgmt_subvolgroup_scenarios.py - polarion-id: CEPH-83604079 - desc: cephfs subvolumegroup scenarios - abort-on-fail: false diff --git a/suites/reef/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml b/suites/reef/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml index ca1ae6c5f..340b9c294 100644 --- a/suites/reef/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml +++ b/suites/reef/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml @@ -482,3 +482,9 @@ tests: polarion-id: CEPH-83604097 desc: Basic info validation after volume creation and deletion abort-on-fail: true + - test: + name: volume rename, subvolume earmark, subvolumegroup idempotence scenarios + module: cephfs_vol_management.cephfs_vol_mgmt_rename_earmark_subvolume.py + polarion-id: CEPH-83604978 + desc: volume rename, subvolume earmark, subvolumegroup idempotence scenarios + abort-on-fail: false diff --git a/suites/squid/cephfs/tier-2_cephfs_test-volume-management_arch.yaml b/suites/squid/cephfs/tier-2_cephfs_test-volume-management_arch.yaml index 7f41334ba..474cebe09 100644 --- a/suites/squid/cephfs/tier-2_cephfs_test-volume-management_arch.yaml +++ b/suites/squid/cephfs/tier-2_cephfs_test-volume-management_arch.yaml @@ -340,21 +340,3 @@ tests: polarion-id: CEPH-11333 desc: File system life cycle abort-on-fail: false - - test: - name: volume related scenarios(delete,rename) - module: cephfs_vol_management.cephfs_vol_mgmt_volume_scenarios.py - polarion-id: CEPH-83603354 - desc: volume related scenarios(delete,rename) - abort-on-fail: false - - test: - name: cephfs subvolumegroup scenarios - module: cephfs_vol_management.cephfs_vol_mgmt_subvolgroup_scenarios.py - polarion-id: CEPH-83604079 - desc: cephfs subvolumegroup scenarios - abort-on-fail: false - - test: - name: cephfs subvolume idempoence earmark - module: cephfs_vol_management.cephfs_vol_mgmt_subvol_idempotence_earmark.py - polarion-id: CEPH-83604184 - desc: cephfs subvolume idempoence earmark - abort-on-fail: false diff --git a/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml b/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml index e2465b896..340b9c294 100644 --- a/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml +++ b/suites/squid/cephfs/tier-2_file-dir-lay_vol-mgmt_nfs.yaml @@ -476,15 +476,15 @@ tests: module: cephfs_nfs.nfs_multiple_export_using_single_conf.py name: "nfs_multiple_export_using_single_conf" polarion-id: "CEPH-83575082" - - test: - name: cephfs subvolume idempoence earmark - module: cephfs_vol_management.cephfs_vol_mgmt_subvol_idempotence_earmark.py - polarion-id: CEPH-83604184 - desc: cephfs subvolume idempoence earmark - abort-on-fail: false - test: name: Basic info validation after volume creation and deletion module: cephfs_vol_management.cephfs_vol_mgmt_test_volume.py polarion-id: CEPH-83604097 desc: Basic info validation after volume creation and deletion abort-on-fail: true + - test: + name: volume rename, subvolume earmark, subvolumegroup idempotence scenarios + module: cephfs_vol_management.cephfs_vol_mgmt_rename_earmark_subvolume.py + polarion-id: CEPH-83604978 + desc: volume rename, subvolume earmark, subvolumegroup idempotence scenarios + abort-on-fail: false diff --git a/tests/cephfs/cephfs_utilsV1.py b/tests/cephfs/cephfs_utilsV1.py index ed68aac62..495b62f07 100644 --- a/tests/cephfs/cephfs_utilsV1.py +++ b/tests/cephfs/cephfs_utilsV1.py @@ -5670,3 +5670,40 @@ def validate_dicts(self, dicts, keys_to_check): log.error(f"Key '{key}' mismatch: Values = {values}") return False return True + + def rename_volume(self, client, old_name, new_name): + log.info(f"[Fail {old_name} before renaming it]") + client.exec_command( + sudo=True, cmd=f"ceph fs fail {old_name} --yes-i-really-mean-it" + ) + log.info("[Set refuse_client_session to true]") + client.exec_command( + sudo=True, cmd=f"ceph fs set {old_name} refuse_client_session true" + ) + log.info("[Rename the volume]") + rename_cmd = f"ceph fs rename {old_name} {new_name} --yes-i-really-mean-it" + out, ec = client.exec_command(sudo=True, cmd=rename_cmd) + if "renamed." not in ec: + log.error(ec) + log.error(f"Failed to rename the volume: {out}") + return 1 + out, ec = client.exec_command(sudo=True, cmd=f"ceph fs ls") + if new_name not in out: + log.error(f"Volume not renamed: {out}") + return 1 + log.info(f"Volume renamed successfully: {out}") + log.info("Put it back to previous state") + client.exec_command( + sudo=True, cmd=f"ceph fs set {new_name} refuse_client_session false" + ) + client.exec_command(sudo=True, cmd=f"ceph fs set {new_name} joinable true") + timer = 10 + while timer > 0: + out, ec = client.exec_command(sudo=True, cmd=f"ceph fs status {new_name}") + if "active" in out: + break + time.sleep(5) + timer -= 1 + log.info(f"Volume {new_name} is active now") + log.info("Renaming and verification of volume successful") + return 0 diff --git a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_rename_earmark_subvolume.py b/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_rename_earmark_subvolume.py new file mode 100644 index 000000000..3f0fc8043 --- /dev/null +++ b/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_rename_earmark_subvolume.py @@ -0,0 +1,677 @@ +import random +import time +import traceback + +from ceph.parallel import parallel +from tests.cephfs.cephfs_utilsV1 import FsUtils +from tests.io.fs_io import fs_io +from utility.log import Log + +log = Log(__name__) +""" +ceph fs volume rename [--yes-i-really-mean-it] +# Delete Scenarios +1. Create a volume +2. Try to delete the volume wihh mon_allow_pool_delete to false +3. Check volume delete fails and validate the error message +4. Set mon_allow_pool_delete to true +5. Delete the volume and check if the volume is deleted +# Rename scenarios +6. Try to rename the volume without refuse_client_session flag +7. Try to rename the volume without fail the volume +8. Try to rename the volume with additional pools +9. Try to rename the volume with subvolumegroup and subvolume +10. Rename the volume while IO is running in the mounted directory +# subvolume earmark and subvolume group idempotence +11 .Creating subvolumes with desired uid, gid, modes, with and without groups +12. Creating subvolumes with invalid size (negative and 0) +13. Creating subvolumes with --group_name=_nogroup +14. Setting and getting earmarks during subvolume creation and on existing subvolumes +15. Setting earmarks with group_name +16. Setting and removing earmarks while IO is in progress +""" + + +def run(ceph_cluster, **kw): + try: + tc = "CEPH-83604978" + log.info(f"Running CephFS tests for - {tc}") + # Initialize the utility class for CephFS + fs_util = FsUtils(ceph_cluster) + # Get the client nodes + clients = ceph_cluster.get_ceph_objects("client") + config = kw.get("config") + # Authenticate the clients + fs_util.auth_list(clients) + build = config.get("build", config.get("rhbuild")) + # Prepare the clients + fs_util.prepare_clients(clients, build) + client1 = clients[0] + ran_string = "".join(random.choices("abcdefghijklmnopqrstuvwxyz", k=5)) + start_volume = f"cephfs1_{ran_string}" + fs_util.create_fs(client1, start_volume) + log.info(client1.exec_command(sudo=True, cmd="ceph fs ls")) + volume_name_list = [] + # Try to delete the volume with mon_allow_pool_delete to false + log.info( + "\n" + "\n---------------***************-------------------------------------------------------" + "\n Scenario 1: Volume Delete scenarios" + "\n---------------***************-------------------------------------------------------" + "\n" + ) + client1.exec_command( + sudo=True, cmd="ceph config set mon mon_allow_pool_delete false" + ) + delete_result, delete_ec = client1.exec_command( + sudo=True, + cmd=f"ceph fs volume rm {start_volume} --yes-i-really-mean-it", + check_ec=False, + ) + if delete_result == 0: + log.error( + "Volume deletetion should not succeed when mon_allow_pool_delete is false" + ) + return 1 + log.info("Volume deletion failed as expected") + # Set mon_allow_pool_delete to true + client1.exec_command( + sudo=True, cmd="ceph config set mon mon_allow_pool_delete true" + ) + # Delete the volume and check if the volume is deleted + delete_result2, delete_ec2 = client1.exec_command( + sudo=True, cmd=f"ceph fs volume rm {start_volume} --yes-i-really-mean-it" + ) + if delete_ec2 == 0: + log.error("Volume deletion failed") + return 1 + else: + log.info("Volume deletion successful") + # Rename the volume + log.info( + "\n" + "\n---------------***************-------------------------------------------------------" + "\n Scenario 2: Volume rename scenarios" + "\n---------------***************-------------------------------------------------------" + "\n" + ) + fs_name = f"cephfs1_{ran_string}" + client1.exec_command(sudo=True, cmd=f"ceph fs volume create {fs_name}") + mds_nodes = ceph_cluster.get_nodes("mds") + host_list = [node.hostname for node in mds_nodes] + hosts = " ".join(host_list) + client1.exec_command( + sudo=True, + cmd=f"ceph orch apply mds {fs_name} --placement='3 {hosts}'", + check_ec=False, + ) + fuse_mounting_dir_1 = f"/mnt/cephfs_fuse_{ran_string}" + fs_util.fuse_mount( + [client1], fuse_mounting_dir_1, extra_params=f" --client_fs {fs_name}" + ) + # fill the cluster up to 50 + cephfs = { + "fill_data": 50, + "io_tool": "smallfile", + "mount": "fuse", + "filesystem": fs_name, + "mount_dir": f"{fuse_mounting_dir_1}", + } + # fill up to 50% of the cluster + fs_io(client=clients[0], fs_config=cephfs, fs_util=fs_util) + volume_name_list.append(f"cephfs1_{ran_string}") + client1.exec_command( + sudo=True, + cmd=f"ceph fs volume rename cephfs1_{ran_string} cephfs2_{ran_string} --yes-i-really-mean-it", + check_ec=False, + ) + # check if the volume is renamed with active volume and without refuse_client flag + rename_result, rename_ec = client1.exec_command(sudo=True, cmd="ceph fs ls") + if f"cephfs2_{ran_string}" in rename_result: + log.error( + "Volume rename should not succeed when the volume is active " + "and refuse_client_session is false" + ) + return 1 + else: + log.info("Volume rename successful") + # Rename the volume only when the volume is down + client1.exec_command(sudo=True, cmd=f"ceph fs fail cephfs1_{ran_string}") + client1.exec_command( + sudo=True, + cmd=f"ceph fs volume rename cephfs1_{ran_string} cephfs2_{ran_string} --yes-i-really-mean-it", + check_ec=False, + ) + rename_result, rename_ec = client1.exec_command(sudo=True, cmd="ceph fs ls") + if f"cephfs2_{ran_string}" in rename_result: + log.error( + "[Volume rename should not succeed without refuse_client_session flag true]" + ) + return 1 + else: + log.info( + "Volume rename did not go through as expected without refuse_client_session flag true" + ) + log.info( + "[Rename the volume with refuse_client_session flag and volume is down]" + ) + client1.exec_command( + sudo=True, + cmd=f"ceph fs set cephfs1_{ran_string} refuse_client_session true", + ) + result = fs_util.rename_volume( + client1, f"cephfs1_{ran_string}", f"cephfs2_{ran_string}" + ) + if result == 1: + log.error("Volume rename failed") + return 1 + else: + volume_name_list.append(f"cephfs2_{ran_string}") + log.info("Volume rename successful") + + # Rename the volume with additional pools + pool_name = f"cephfs2_data_pool_{ran_string}" + client1.exec_command(sudo=True, cmd=f"ceph osd pool create {pool_name} 32 32") + client1.exec_command( + sudo=True, cmd=f"ceph fs add_data_pool cephfs2_{ran_string} {pool_name}" + ) + result = fs_util.rename_volume( + client1, f"cephfs2_{ran_string}", f"cephfs3_{ran_string}" + ) + if result == 1: + volume_name_list.append(f"cephfs2_{ran_string}") + log.error("Volume rename failed") + return 1 + else: + volume_name_list.append(f"cephfs3_{ran_string}") + log.info("Volume rename successful with additional pools") + # Rename the volume with subvolumegroup and subvolume + group_name = f"cephfs3_subvolumegroup_{ran_string}" + substart_volume = f"cephfs3_subvolume_{ran_string}" + fs_util.create_subvolumegroup( + client1, f"cephfs3_{ran_string}", group_name=group_name + ) + time.sleep(5) + client1.exec_command( + sudo=True, + cmd=f"ceph fs subvolume create cephfs3_{ran_string} {substart_volume} --group_name {group_name}", + ) + volume_name_list.append(f"cephfs3_{ran_string}") + result = fs_util.rename_volume( + client1, f"cephfs3_{ran_string}", f"cephfs4_{ran_string}" + ) + if result == 1: + log.error("Volume rename failed") + return 1 + else: + volume_name_list.append(f"cephfs4_{ran_string}") + log.info("Volume rename successful with subvolumegroup and subvolume") + log.info("[Rename the volume while IO is going on in the mounted directory]") + with parallel() as p: + p.spawn(fs_util.run_ios(client1, fuse_mounting_dir_1, ["dd", "smallfile"])) + result = fs_util.rename_volume( + client1, f"cephfs4_{ran_string}", f"cephfs5_{ran_string}" + ) + if result == 1: + log.error("Volume rename failed") + return 1 + else: + volume_name_list.append(f"cephfs5_{ran_string}") + log.info("Volume rename successful with IOs") + log.info("[Rename scenario is successful]") + log.info( + "\n" + "\n---------------***************-------------------------------------------------------" + "\n Scenario 3: Subvolumegroup create scenarios" + "\n---------------***************-------------------------------------------------------" + "\n" + ) + group_name = f"invalid_layout_group_{ran_string}" + fs_name = f"cephfs5_{ran_string}" + command = f"ceph fs subvolumegroup create {fs_name} {group_name} --pool_layout invalid" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Error EINVAL" not in err: + log.error( + f"Subvolumegroup creation with invalid pool layout should have failed: {out}" + ) + return 1 + log.info("Subvolumegroup creation with invalid pool layout failed as expected") + # Create a subvolumegroup with valid pool layout + pool_name = f"cephfs.data.{fs_name}" + client1.exec_command(sudo=True, cmd=f"ceph osd pool create {pool_name} 16 16") + client1.exec_command( + sudo=True, cmd=f"ceph fs add_data_pool {fs_name} {pool_name}" + ) + group_name = f"valid_layout_group_{ran_string}" + command = f"ceph fs subvolumegroup create {fs_name} {group_name} --pool_layout {pool_name}" + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error EINVAL" in err: + log.error(f"Subvolumegroup creation with valid pool layout failed: {out}") + return 1 + out, err = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolumegroup info {fs_name} {group_name}" + ) + if "Error" in err: + log.error(f"Subvolumegroup creation with valid pool layout failed: {err}") + return 1 + out, err = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolumegroup ls {fs_name}" + ) + if group_name not in out: + log.error(f"Subvolumegroup creation with valid pool layout failed: {err}") + return 1 + log.info("Subvolumegroup creation with valid pool layout successful") + # Create a subvolumegroup with desired mode + modes_to_test = [ + "700", # owner: rwx + "750", # owner: rwx, group: r-x + "755", # owner: rwx, group: r-x, others: r-x + "640", # owner: rw-, group: r--, others: --- + "644", # owner: rw-, group: r--, others: r-- + "600", # owner: rw-, group: ---, others: --- + ] + for mode in modes_to_test: + group_name = f"mode_{mode}_group_{ran_string}" + command = ( + f"ceph fs subvolumegroup create {fs_name} {group_name} --mode {mode}" + ) + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolumegroup creation with mode {mode} failed: {err}") + return 1 + out, err = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolumegroup getpath {fs_name} {group_name}" + ) + ls_out, ls_err = client1.exec_command( + sudo=True, cmd=f"ls -ld {fuse_mounting_dir_1}/" + ) + log.info(f"ls_out: {ls_out}") + stat_out, stat_err = client1.exec_command( + sudo=True, cmd=f"stat {fuse_mounting_dir_1}{out}" + ) + if f"0{mode}" not in stat_out: + log.error(f"Subvolumegroup creation with mode {mode} failed: {err}") + return 1 + log.info(f"Subvolumegroup creation with mode {mode} successful") + client1.exec_command( + sudo=True, + cmd=f"ceph fs subvolumegroup rm {fs_name} {group_name} --force", + ) + # Create a subvolumegroup with invalid mode + group_name = f"invalid_mode_group_{ran_string}" + command = f"ceph fs subvolumegroup create {fs_name} {group_name} --mode abcd" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Error EINVAL" not in (err or out): + log.error( + f"Subvolumegroup creation with invalid mode should have failed: {out}" + ) + return 1 + log.info("Subvolumegroup creation with invalid mode failed as expected") + # Create a subvolumegroup with desired UID/GID + group_name = f"desired_uid_gid_group_{ran_string}" + command = f"ceph fs subvolumegroup create {fs_name} {group_name} --uid 1000 --gid 1000" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolumegroup creation with desired UID/GID failed: {err}") + return 1 + log.info("Subvolumegroup creation with desired UID/GID successful") + + # Create a subvolumegroup with invalid UID/GID + group_name = f"invalid_uid_gid_group_{ran_string}" + command = ( + f"ceph fs subvolumegroup create {fs_name} {group_name} --uid -1 --gid abcd" + ) + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Error EINVAL" not in err: + log.error( + f"Subvolumegroup creation with invalid UID/GID should have failed: {out}" + ) + return 1 + log.info("Subvolumegroup creation with invalid UID/GID failed as expected") + + # Create a subvolumegroup with specific size + group_name = f"specific_size_group_{ran_string}" + command = f"ceph fs subvolumegroup create {fs_name} {group_name} --size 10000" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolumegroup creation with specific size failed: {err}") + return 1 + out, err = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolumegroup exist {fs_name}" + ) + if "no subvolumegroup" in out: + log.error(f"Subvolumegroup creation with various options failed: {err}") + return 1 + log.info("Subvolumegroup creation with specific size successful") + # Resize the subvolumegroup increasing the size + command = f"ceph fs subvolumegroup resize {fs_name} {group_name} 40000" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolumegroup resize (increasing size) failed: {err}") + return 1 + log.info("Subvolumegroup resize (increasing size) successful") + + # Resize the subvolumegroup decreasing the size + command = f"ceph fs subvolumegroup resize {fs_name} {group_name} 10000" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolumegroup resize (decreasing size) failed: {err}") + return 1 + log.info("Subvolumegroup resize (decreasing size) successful") + # Create a subvolumegroup with various options + group_name = f"various_options_group_{ran_string}" + command = ( + f"ceph fs subvolumegroup create {fs_name} {group_name} --mode 750" + f" --uid 1001 --gid 1001 --size 1000000" + ) + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolumegroup creation with various options failed: {err}") + return 1 + out, err = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolumegroup exist {fs_name}" + ) + if "no subvolumegroup" in out: + log.error(f"Subvolumegroup creation with various options failed: {err}") + return 1 + out, err = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolumegroup getpath {fs_name} {group_name}" + ) + # verify the result with stat + stat_out, stat_err = client1.exec_command( + sudo=True, cmd=f"stat {fuse_mounting_dir_1}{out}" + ) + if "1001" not in stat_out: + log.error(f"Subvolumegroup creation with various options failed: {err}") + return 1 + log.info("Subvolumegroup creation with various options successful") + info_out, info_err = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolumegroup info {fs_name} {group_name}" + ) + if "1000000" not in info_out: + log.error(f"Subvolumegroup creation with various options failed: {err}") + return 1 + log.info("Subvolumegroup creation with various options successful") + if "rwxr-x---" not in stat_out: + log.error(f"Subvolumegroup creation with various options failed: {err}") + return 1 + log.info(f"Subvolumegroup options verification successful for {group_name}") + subfs_name = f"subvol1_{ran_string}" + # Idempotence default - same command 2-3 times + for _ in range(3): + command = f"ceph fs subvolume create {fs_name} {subfs_name}" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolume creation failed: {err}") + return 1 + log.info("Subvolume creation idempotence (default) successful") + # Idempotence with modes, uid, gid - multiple retries + subfs_name = f"subvol2_{ran_string}" + for _ in range(3): + command = f"ceph fs subvolume create {fs_name} {subfs_name} --mode 755 --uid 1000 --gid 1000" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolume creation failed: {err}") + return 1 + log.info("Subvolume creation idempotence (with modes, uid, gid) successful") + # With isolated_namespace + subfs_name = f"subvol3_{ran_string}" + command = ( + f"ceph fs subvolume create {fs_name} {subfs_name} --namespace-isolated" + ) + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolume creation with isolated_namespace failed: {err}") + return 1 + log.info("Subvolume creation with isolated_namespace successful") + # Failure in creation should cleanup incomplete subvolumes + subfs_name = f"subvol4_{ran_string}" + command = f"ceph fs subvolume create {fs_name} {subfs_name} --invalid_option" # Simulate failure + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Error" not in err: + log.error(f"Subvolume creation should have failed: {out}") + return 1 + command = f"ceph fs subvolume ls {fs_name}" + out, err = client1.exec_command(sudo=True, cmd=command) + if subfs_name in out: + log.error(f"Incomplete subvolume {subfs_name} was not cleaned up") + return 1 + log.info("Subvolume creation failure and cleanup successful") + # Desired uid, gid, modes, with and without groups + subfs_name = f"subvol5_{ran_string}" + command = f"ceph fs subvolume create {fs_name} {subfs_name} --mode 755 --uid 1000 --gid 1000" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error(f"Subvolume creation with uid, gid, modes failed: {err}") + return 1 + subfs_name = f"subvol6_{ran_string}" + subvol_group_name = f"mygroup_{ran_string}" + fs_util.create_subvolumegroup(client1, fs_name, subvol_group_name) + command = ( + f"ceph fs subvolume create {fs_name} {subfs_name}" + f" --mode 700 --uid 1001 --gid 1001 --group_name {subvol_group_name}" + ) + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command) + if "Error" in err: + log.error( + f"Subvolume creation with uid, gid, modes, and group failed: {err}" + ) + return 1 + log.info( + "Subvolume creation with desired uid, gid, modes, and group successful" + ) + # Invalid Size [Negative, 0] + subfs_name = f"subvol7_{ran_string}" + command = f"ceph fs subvolume create {fs_name} {subfs_name} --size -1" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Error EINVAL" not in err: + log.error(f"Subvolume creation with invalid size should have failed: {out}") + return 1 + subfs_name = f"subvol8_{ran_string}" + command = f"ceph fs subvolume create {fs_name} {subfs_name} --size 0" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Error EINVAL" in err: + log.error( + f"Subvolume creation with invalid size [0] should have failed: {out}" + ) + return 1 + log.info("Subvolume creation with invalid size failed as expected") + # With --group_name=_nogroup + subfs_name = f"subvol9_{ran_string}" + command = ( + f"ceph fs subvolume create {fs_name} {subfs_name} --group_name=_nogroup" + ) + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Error" not in err: + log.error( + f"Subvolume creation with --group_name=_nogroup should have failed: {err}" + ) + return 1 + log.info( + "Subvolume creation --group_name=_nogroup(reserved keyword) failed as expected" + ) + # ceph version cheeck, earmark is only valid from squid + version, _ = client1.exec_command(sudo=True, cmd="ceph version") + if "squid" in version: + log.info( + "\n" + "\n---------------***************-------------------------------------------------------" + "\n Scenario 4: Subvolume earmark scenarios" + "\n---------------***************-------------------------------------------------------" + "\n" + ) + # set earmark [tag] while subvolume creation + subfs_name = f"subvol10_{ran_string}" + fs_util.create_subvolume(client1, fs_name, subfs_name, earmark="nfs.share1") + # Get earmark + out = fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + if "nfs.share1" not in out: + log.error(f"Subvolume get earmark failed: {err}") + return 1 + # https://bugzilla.redhat.com/show_bug.cgi?id=2332723 + # create subvolume with earmark with group_name + # subfs_name = f"subvol11_{ran_string}" + # fs_util.create_subvolume(client1, fs_name, subfs_name, + # group_name=subvol_group_name, earmark="smb.share2") + # # Get earmark + # out,err=fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + # if "smb.share2" not in out: + # log.error(f"Subvolume get earmark failed: {err}") + # return 1 + # Create a subvolume and set earmark + subfs_name = f"subvol12_{ran_string}" + fs_util.create_subvolume(client1, fs_name, subfs_name) + # Set earmark + fs_util.set_subvolume_earmark( + client1, fs_name, subfs_name, earmark="nfs.share3" + ) + # Get earmark + out = fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + if "nfs.share3" not in out: + log.error(f"Subvolume get earmark failed: {err}") + return 1 + # create a subvolume and set earmark with invalid name (other than nfs/smb) + subfs_name = f"subvol13_{ran_string}" + fs_util.create_subvolume(client1, fs_name, subfs_name) + # Set earmark + command = f"ceph fs subvolume earmark set {fs_name} {subfs_name} --earmark invalid.share" + log.info(f"Executing command: {command}") + out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) + if "Invalid earmark" not in err: + log.error( + f"Subvolume set earmark with invalid name should have failed: {out}" + ) + return 1 + # Get earmark + out = fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + if "invalid.share" in out: + log.error(f"Invalid earmark should have failed: {out}") + return 1 + # create a subvolume and set earmark and remove the earmark + subfs_name = f"subvol14_{ran_string}" + fs_util.create_subvolume(client1, fs_name, subfs_name) + # Set earmark + fs_util.set_subvolume_earmark( + client1, fs_name, subfs_name, earmark="nfs.share4" + ) + # Get earmark + out = fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + if "nfs.share4" not in out: + log.error(f"Subvolume get earmark failed: {err}") + return 1 + # Remove earmark + fs_util.remove_subvolume_earmark(client1, fs_name, subfs_name) + # Get earmark + out = fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + if "nfs.share4" in out: + log.error(f"Subvolume get earmark failed: {err}") + return 1 + # Set earmark and rm earmark while IO is in progress + subfs_name = f"subvol15_{ran_string}" + fs_util.create_subvolume(client1, fs_name, subfs_name) + # run IOs in the subvolume + sub_path, _ = client1.exec_command( + sudo=True, cmd=f"ceph fs subvolume getpath {fs_name} {subfs_name}" + ) + with parallel() as p: + p.spawn(fs_util.run_ios(client1, f"{fuse_mounting_dir_1}{sub_path}")) + # Set earmark + fs_util.set_subvolume_earmark( + client1, fs_name, subfs_name, earmark="nfs.share5" + ) + # Get earmark + out = fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + log.info(f"Get earmark output: {out}") + if "nfs.share5" not in out: + log.error(f"Subvolume get earmark failed: {err}") + return 1 + # Remove earmark + fs_util.remove_subvolume_earmark(client1, fs_name, subfs_name) + # Get earmark + out = fs_util.get_subvolume_earmark(client1, fs_name, subfs_name) + if "nfs.share5" in out: + log.error(f"Subvolume get earmark failed: {err}") + return 1 + log.info("Subvolume earmark operations successful") + return 0 + + return 0 + except Exception as e: + log.error(e) + log.error(traceback.format_exc()) + return 1 + finally: + log.info( + "\n" + "\n---------------***************-------------------------------------------------------" + "\n Cleaning Up" + "\n---------------***************-------------------------------------------------------" + "\n" + ) + client1.exec_command( + sudo=True, cmd=f"umount {fuse_mounting_dir_1}", check_ec=False + ) + client1.exec_command( + sudo=True, cmd=f"rm -rf {fuse_mounting_dir_1}", check_ec=False + ) + # Clean up the subvolumegroups and volume + for i in range(1, 15): + substart_volume = f"subvol{i}_{ran_string}" + client1.exec_command( + sudo=True, + cmd=f"ceph fs subvolume rm {fs_name} {substart_volume} --force || true", + check_ec=False, + ) + fs_name = f"cephfs5_{ran_string}" + subvol_group_name = f"mygroup_{ran_string}" + subvolume_name = f"subvol15_{ran_string}" + # client1.exec_command( + # sudo=True, + # cmd=f"ceph fs subvolume rm {fs_name} subvol6_{ran_string} --group_name mygroup_{ran_string}", + # ) + client1.exec_command( + sudo=True, cmd=f"ceph fs subvolume rm {fs_name} {subvolume_name} " + ) + group_rm_list = [ + f"cephfs3_subvolumegroup_{ran_string}", + f"mygroup_{ran_string}", + f"invalid_layout_group_{ran_string}", + f"valid_layout_group_{ran_string}", + f"desired_mode_group_{ran_string}", + f"invalid_mode_group_{ran_string}", + f"desired_uid_gid_group_{ran_string}", + f"invalid_uid_gid_group_{ran_string}", + f"specific_size_group_{ran_string}", + f"various_options_group_{ran_string}", + ] + for group_name in group_rm_list: + client1.exec_command( + sudo=True, + cmd=f"ceph fs subvolumegroup rm {fs_name} {group_name} --force || true", + ) + client1.exec_command( + sudo=True, cmd="ceph config set mon mon_allow_pool_delete true" + ) + fs_util.remove_fs(client1, fs_name) + client1.exec_command( + sudo=True, + cmd=f"ceph fs subvolumegroup rm {fs_name}_{ran_string} {subvol_group_name} --force || true", + ) diff --git a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_group_scenarios.py b/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_group_scenarios.py deleted file mode 100644 index 929fc1316..000000000 --- a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_group_scenarios.py +++ /dev/null @@ -1,300 +0,0 @@ -import random -import string -import traceback - -from tests.cephfs.cephfs_utilsV1 import FsUtils -from utility.log import Log - -log = Log(__name__) - - -def run(ceph_cluster, **kw): - """ - CephFS Subvolumegroup Test - - This test suite validates the functionality of the `ceph fs subvolumegroup` command, - covering various scenarios including: - - 1. Creating a subvolumegroup with invalid and valid pool layout flags - 2. Creating a subvolumegroup with desired and invalid modes - 3. Creating a subvolumegroup with desired and invalid UID/GID - 4. Creating and resizing a subvolumegroup with specific size - 5. Creating a subvolumegroup with various options (mode, uid, gid, size) - - """ - try: - tc = "CEPH-83604079" - log.info(f"Running CephFS tests for - {tc}") - fs_util = FsUtils(ceph_cluster) - clients = ceph_cluster.get_ceph_objects("client") - config = kw.get("config") - fs_util.auth_list(clients) - build = config.get("build", config.get("rhbuild")) - fs_util.prepare_clients(clients, build) - client1 = clients[0] - vol_name = f"cephfs_{random.randint(0, 1000)}" - fs_util.create_fs(client1, vol_name) - mount_dir = "".join( - random.choice(string.ascii_lowercase + string.digits) - for _ in list(range(5)) - ) - fuse_mounting_dir = f"/mnt/cephfs_fuse_{mount_dir}/" - fs_util.fuse_mount( - [client1], fuse_mounting_dir, extra_params=f" --client_fs {vol_name}" - ) - random_string = "".join(random.choices("abcdefghijklmnopqrstuvwxyz", k=5)) - # Create a subvolumegroup with invalid pool layout - group_name = f"invalid_layout_group_{random_string}" - command = f"ceph fs subvolumegroup create {vol_name} {group_name} --pool_layout invalid" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Error EINVAL" not in err: - log.error( - f"Subvolumegroup creation with invalid pool layout should have failed: {out}" - ) - return 1 - log.info("Subvolumegroup creation with invalid pool layout failed as expected") - # Create a subvolumegroup with valid pool layout - pool_name = f"cephfs.data.{vol_name}" - client1.exec_command( - sudo=True, cmd=f"ceph osd pool create {pool_name} 128 128" - ) - client1.exec_command( - sudo=True, cmd=f"ceph fs add_data_pool {vol_name} {pool_name}" - ) - group_name = f"valid_layout_group_{random_string}" - command = f"ceph fs subvolumegroup create {vol_name} {group_name} --pool_layout {pool_name}" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error EINVAL" in err: - log.error(f"Subvolumegroup creation with valid pool layout failed: {out}") - return 1 - out, err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup info {vol_name} {group_name}" - ) - if "Error" in err: - log.error(f"Subvolumegroup creation with valid pool layout failed: {err}") - return 1 - out, err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup ls {vol_name}" - ) - if group_name not in out: - log.error(f"Subvolumegroup creation with valid pool layout failed: {err}") - return 1 - log.info("Subvolumegroup creation with valid pool layout successful") - # Create a subvolumegroup with desired mode - modes_to_test = [ - "700", # owner: rwx - "750", # owner: rwx, group: r-x - "755", # owner: rwx, group: r-x, others: r-x - "640", # owner: rw-, group: r--, others: --- - "644", # owner: rw-, group: r--, others: r-- - "600", # owner: rw-, group: ---, others: --- - ] - for mode in modes_to_test: - group_name = f"mode_{mode}_group_{random_string}" - command = ( - f"ceph fs subvolumegroup create {vol_name} {group_name} --mode {mode}" - ) - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolumegroup creation with mode {mode} failed: {err}") - return 1 - out, err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup getpath {vol_name} {group_name}" - ) - ls_out, ls_err = client1.exec_command( - sudo=True, cmd=f"ls -ld {fuse_mounting_dir}/" - ) - log.info(f"ls_out: {ls_out}") - stat_out, stat_err = client1.exec_command( - sudo=True, cmd=f"stat {fuse_mounting_dir}{out}" - ) - if f"0{mode}" not in stat_out: - log.error(f"Subvolumegroup creation with mode {mode} failed: {err}") - return 1 - log.info(f"Subvolumegroup creation with mode {mode} successful") - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} {group_name} --force", - ) - # Create a subvolumegroup with invalid mode - group_name = f"invalid_mode_group_{random_string}" - command = f"ceph fs subvolumegroup create {vol_name} {group_name} --mode abcd" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Error EINVAL" not in (err or out): - log.error( - f"Subvolumegroup creation with invalid mode should have failed: {out}" - ) - return 1 - log.info("Subvolumegroup creation with invalid mode failed as expected") - # Create a subvolumegroup with desired UID/GID - group_name = f"desired_uid_gid_group_{random_string}" - command = f"ceph fs subvolumegroup create {vol_name} {group_name} --uid 1000 --gid 1000" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolumegroup creation with desired UID/GID failed: {err}") - return 1 - log.info("Subvolumegroup creation with desired UID/GID successful") - - # Create a subvolumegroup with invalid UID/GID - group_name = f"invalid_uid_gid_group_{random_string}" - command = ( - f"ceph fs subvolumegroup create {vol_name} {group_name} --uid -1 --gid abcd" - ) - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Error EINVAL" not in err: - log.error( - f"Subvolumegroup creation with invalid UID/GID should have failed: {out}" - ) - return 1 - log.info("Subvolumegroup creation with invalid UID/GID failed as expected") - - # Create a subvolumegroup with specific size - group_name = f"specific_size_group_{random_string}" - command = f"ceph fs subvolumegroup create {vol_name} {group_name} --size 10000" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolumegroup creation with specific size failed: {err}") - return 1 - out, err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup exist {vol_name}" - ) - if "no subvolumegroup" in out: - log.error(f"Subvolumegroup creation with various options failed: {err}") - return 1 - log.info("Subvolumegroup creation with specific size successful") - # Resize the subvolumegroup increasing the size - command = f"ceph fs subvolumegroup resize {vol_name} {group_name} 40000" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolumegroup resize (increasing size) failed: {err}") - return 1 - log.info("Subvolumegroup resize (increasing size) successful") - - # Resize the subvolumegroup decreasing the size - command = f"ceph fs subvolumegroup resize {vol_name} {group_name} 10000" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolumegroup resize (decreasing size) failed: {err}") - return 1 - log.info("Subvolumegroup resize (decreasing size) successful") - # Create a subvolumegroup with various options - group_name = f"various_options_group_{random_string}" - command = ( - f"ceph fs subvolumegroup create {vol_name} {group_name} --mode 750" - f" --uid 1001 --gid 1001 --size 1000000" - ) - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolumegroup creation with various options failed: {err}") - return 1 - out, err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup exist {vol_name}" - ) - if "no subvolumegroup" in out: - log.error(f"Subvolumegroup creation with various options failed: {err}") - return 1 - out, err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup getpath {vol_name} {group_name}" - ) - # verify the result with stat - stat_out, stat_err = client1.exec_command( - sudo=True, cmd=f"stat {fuse_mounting_dir}{out}" - ) - if "1001" not in stat_out: - log.error(f"Subvolumegroup creation with various options failed: {err}") - return 1 - log.info("Subvolumegroup creation with various options successful") - info_out, info_err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup info {vol_name} {group_name}" - ) - if "1000000" not in info_out: - log.error(f"Subvolumegroup creation with various options failed: {err}") - return 1 - log.info("Subvolumegroup creation with various options successful") - if "rwxr-x---" not in stat_out: - log.error(f"Subvolumegroup creation with various options failed: {err}") - return 1 - log.info(f"Subvolumegroup options verification successful for {group_name}") - - return 0 - - except Exception as e: - log.error(e) - log.error(traceback.format_exc()) - return 1 - - finally: - log.info("Cleaning up") - # unmount the fuse mount - client1.exec_command( - sudo=True, cmd=f"umount {fuse_mounting_dir}", check_ec=False - ) - client1.exec_command( - sudo=True, cmd=f"rm -rf {fuse_mounting_dir}", check_ec=False - ) - # Clean up the subvolumegroups and volume - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"invalid_layout_group_{random_string} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"valid_layout_group_{random_string} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"desired_mode_group_{random_string} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"invalid_mode_group_{random_string} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"desired_uid_gid_group_{random_string} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"invalid_uid_gid_group_{random_string} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"specific_size_group_{random_string} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name} " - f"various_options_group_{random_string} --force || true", - ) - out, err = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolumegroup exist {vol_name}" - ) - log.info(f"Subvolumegroup cleanup: {out}") - if "no subvolumegroup" not in out: - log.error(f"Subvolumegroup cleanup failed: {err}") - return 1 - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rm {vol_name} --yes-i-really-mean-it || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph osd pool rm {pool_name} {pool_name} --yes-i-really-mean-it || true", - ) diff --git a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_idempotence_earmark.py b/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_idempotence_earmark.py deleted file mode 100644 index 3cb05ab5b..000000000 --- a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_subvol_idempotence_earmark.py +++ /dev/null @@ -1,279 +0,0 @@ -import random -import traceback - -from ceph.parallel import parallel -from tests.cephfs.cephfs_utilsV1 import FsUtils -from utility.log import Log - -log = Log(__name__) - - -def run(ceph_cluster, **kw): - """ - CephFS Subvolume Test - - This test suite validates the functionality of the `ceph fs subvolume` command, - covering various scenarios including: - - * Idempotence with and without parameters (modes, uid, gid) - * Creating subvolumes with isolated namespace - * Failure handling and cleanup of incomplete subvolumes - * Creating subvolumes with desired uid, gid, modes, with and without groups - * Creating subvolumes with invalid size (negative and 0) - * Creating subvolumes with --group_name=_nogroup - * Setting and getting earmarks during subvolume creation and on existing subvolumes - * Setting earmarks with group_name - * Setting and removing earmarks while IO is in progress - """ - try: - tc = "CEPH-83604184" - log.info(f"Running CephFS tests for - {tc}") - fs_util = FsUtils(ceph_cluster) - clients = ceph_cluster.get_ceph_objects("client") - config = kw.get("config") - fs_util.auth_list(clients) - build = config.get("build", config.get("rhbuild")) - fs_util.prepare_clients(clients, build) - client1 = clients[0] - vol_name = f"cephfs_subvolume_test_{random.randint(0, 1000)}" - fs_util.create_fs(client1, vol_name) - random_str = "".join(random.choices("abcdefghijklmnopqrstuvwxyz", k=5)) - fuse_mounting_dir_1 = f"/mnt/cephfs_fuse_{random_str}" - fs_util.fuse_mount( - [client1], fuse_mounting_dir_1, extra_params=f" --client_fs {vol_name}" - ) - subvol_name = f"subvol1_{random_str}" - # Idempotence default - same command 2-3 times - for _ in range(3): - command = f"ceph fs subvolume create {vol_name} {subvol_name}" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolume creation failed: {err}") - return 1 - log.info("Subvolume creation idempotence (default) successful") - # Idempotence with modes, uid, gid - multiple retries - subvol_name = f"subvol2_{random_str}" - for _ in range(3): - command = f"ceph fs subvolume create {vol_name} {subvol_name} --mode 755 --uid 1000 --gid 1000" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolume creation failed: {err}") - return 1 - log.info("Subvolume creation idempotence (with modes, uid, gid) successful") - # With isolated_namespace - subvol_name = f"subvol3_{random_str}" - command = ( - f"ceph fs subvolume create {vol_name} {subvol_name} --namespace-isolated" - ) - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolume creation with isolated_namespace failed: {err}") - return 1 - log.info("Subvolume creation with isolated_namespace successful") - # Failure in creation should cleanup incomplete subvolumes - subvol_name = f"subvol4_{random_str}" - command = f"ceph fs subvolume create {vol_name} {subvol_name} --invalid_option" # Simulate failure - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Error" not in err: - log.error(f"Subvolume creation should have failed: {out}") - return 1 - command = f"ceph fs subvolume ls {vol_name}" - out, err = client1.exec_command(sudo=True, cmd=command) - if subvol_name in out: - log.error(f"Incomplete subvolume {subvol_name} was not cleaned up") - return 1 - log.info("Subvolume creation failure and cleanup successful") - # Desired uid, gid, modes, with and without groups - subvol_name = f"subvol5_{random_str}" - command = f"ceph fs subvolume create {vol_name} {subvol_name} --mode 755 --uid 1000 --gid 1000" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error(f"Subvolume creation with uid, gid, modes failed: {err}") - return 1 - subvol_name = f"subvol6_{random_str}" - subvol_group_name = f"mygroup_{random_str}" - fs_util.create_subvolumegroup(client1, vol_name, subvol_group_name) - command = ( - f"ceph fs subvolume create {vol_name} {subvol_name}" - f" --mode 700 --uid 1001 --gid 1001 --group_name {subvol_group_name}" - ) - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command) - if "Error" in err: - log.error( - f"Subvolume creation with uid, gid, modes, and group failed: {err}" - ) - return 1 - log.info( - "Subvolume creation with desired uid, gid, modes, and group successful" - ) - # Invalid Size [Negative, 0] - subvol_name = f"subvol7_{random_str}" - command = f"ceph fs subvolume create {vol_name} {subvol_name} --size -1" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Error EINVAL" not in err: - log.error(f"Subvolume creation with invalid size should have failed: {out}") - return 1 - subvol_name = f"subvol8_{random_str}" - command = f"ceph fs subvolume create {vol_name} {subvol_name} --size 0" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Error EINVAL" in err: - log.error( - f"Subvolume creation with invalid size [0] should have failed: {out}" - ) - return 1 - log.info("Subvolume creation with invalid size failed as expected") - # With --group_name=_nogroup - subvol_name = f"subvol9_{random_str}" - command = ( - f"ceph fs subvolume create {vol_name} {subvol_name} --group_name=_nogroup" - ) - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Error" not in err: - log.error( - f"Subvolume creation with --group_name=_nogroup should have failed: {err}" - ) - return 1 - log.info( - "Subvolume creation --group_name=_nogroup(reserved keyword) failed as expected" - ) - # ceph version cheeck, earmark is only valid from squid - version, _ = client1.exec_command(sudo=True, cmd="ceph version") - if "squid" in version: - # set earmark [tag] while subvolume creation - subvol_name = f"subvol10_{random_str}" - fs_util.create_subvolume( - client1, vol_name, subvol_name, earmark="nfs.share1" - ) - # Get earmark - out = fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - if "nfs.share1" not in out: - log.error(f"Subvolume get earmark failed: {err}") - return 1 - # https://bugzilla.redhat.com/show_bug.cgi?id=2332723 - # create subvolume with earmark with group_name - # subvol_name = f"subvol11_{random_str}" - # fs_util.create_subvolume(client1, vol_name, subvol_name, - # group_name=subvol_group_name, earmark="smb.share2") - # # Get earmark - # out,err=fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - # if "smb.share2" not in out: - # log.error(f"Subvolume get earmark failed: {err}") - # return 1 - # Create a subvolume and set earmark - subvol_name = f"subvol12_{random_str}" - fs_util.create_subvolume(client1, vol_name, subvol_name) - # Set earmark - fs_util.set_subvolume_earmark( - client1, vol_name, subvol_name, earmark="nfs.share3" - ) - # Get earmark - out = fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - if "nfs.share3" not in out: - log.error(f"Subvolume get earmark failed: {err}") - return 1 - # create a subvolume and set earmark with invalid name (other than nfs/smb) - subvol_name = f"subvol13_{random_str}" - fs_util.create_subvolume(client1, vol_name, subvol_name) - # Set earmark - command = f"ceph fs subvolume earmark set {vol_name} {subvol_name} --earmark invalid.share" - log.info(f"Executing command: {command}") - out, err = client1.exec_command(sudo=True, cmd=command, check_ec=False) - if "Invalid earmark" not in err: - log.error( - f"Subvolume set earmark with invalid name should have failed: {out}" - ) - return 1 - # Get earmark - out = fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - if "invalid.share" in out: - log.error(f"Invalid earmark should have failed: {out}") - return 1 - # create a subvolume and set earmark and remove the earmark - subvol_name = f"subvol14_{random_str}" - fs_util.create_subvolume(client1, vol_name, subvol_name) - # Set earmark - fs_util.set_subvolume_earmark( - client1, vol_name, subvol_name, earmark="nfs.share4" - ) - # Get earmark - out = fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - if "nfs.share4" not in out: - log.error(f"Subvolume get earmark failed: {err}") - return 1 - # Remove earmark - fs_util.remove_subvolume_earmark(client1, vol_name, subvol_name) - # Get earmark - out = fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - if "nfs.share4" in out: - log.error(f"Subvolume get earmark failed: {err}") - return 1 - # Set earmark and rm earmark while IO is in progress - subvol_name = f"subvol15_{random_str}" - fs_util.create_subvolume(client1, vol_name, subvol_name) - # run IOs in the subvolume - sub_path, _ = client1.exec_command( - sudo=True, cmd=f"ceph fs subvolume getpath {vol_name} {subvol_name}" - ) - with parallel() as p: - p.spawn( - fs_util.run_ios( - client1, f"{fuse_mounting_dir_1}{sub_path}", ["dd", "smallfile"] - ) - ) - # Set earmark - fs_util.set_subvolume_earmark( - client1, vol_name, subvol_name, earmark="nfs.share5" - ) - # Get earmark - out = fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - log.info(f"Get earmark output: {out}") - if "nfs.share5" not in out: - log.error(f"Subvolume get earmark failed: {err}") - return 1 - # Remove earmark - fs_util.remove_subvolume_earmark(client1, vol_name, subvol_name) - # Get earmark - out = fs_util.get_subvolume_earmark(client1, vol_name, subvol_name) - if "nfs.share5" in out: - log.error(f"Subvolume get earmark failed: {err}") - return 1 - log.info("Subvolume earmark operations successful") - return 0 - - return 0 - - except Exception as e: - log.error(e) - log.error(traceback.format_exc()) - return 1 - - finally: - log.info("Cleaning up") - fs_util.client_clean_up( - "umount", fuse_clients=[clients[0]], mounting_dir=fuse_mounting_dir_1 - ) - # Clean up the subvolumes and volume - for i in range(1, 15): - subvol_name = f"subvol{i}_{random_str}" - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolume rm {vol_name}_{random_str} {subvol_name} --force || true", - ) - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rm {vol_name} --yes-i-really-mean-it || true", - ) - # remove the group - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup rm {vol_name}_{random_str} {subvol_group_name} --force || true", - ) diff --git a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_volume_scenarios.py b/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_volume_scenarios.py deleted file mode 100644 index 150e3298c..000000000 --- a/tests/cephfs/cephfs_vol_management/cephfs_vol_mgmt_volume_scenarios.py +++ /dev/null @@ -1,216 +0,0 @@ -import random -import time -import traceback - -from ceph.parallel import parallel -from tests.cephfs.cephfs_utilsV1 import FsUtils -from tests.io.fs_io import fs_io -from utility.log import Log - -log = Log(__name__) -""" -ceph fs volume rename [--yes-i-really-mean-it] -# Delete Scenarios -1. Create a volume -2. Try to delete the volume wihh mon_allow_pool_delete to false -3. Check volume delete fails and validate the error message -4. Set mon_allow_pool_delete to true -5. Delete the volume and check if the volume is deleted -# Rename scenarios -6. Try to rename the volume -7. Try to rename the volume with additional pools -8. Try to rename the volume with subvolumegroup and subvolume -9. Rename the volume when the volume is down -> fail fs -10. Rename the volume with and without refuse_client_session flags - -""" - - -def run(ceph_cluster, **kw): - try: - tc = "CEPH-83603354" - log.info(f"Running CephFS tests for - {tc}") - # Initialize the utility class for CephFS - fs_util = FsUtils(ceph_cluster) - # Get the client nodes - clients = ceph_cluster.get_ceph_objects("client") - config = kw.get("config") - # Authenticate the clients - fs_util.auth_list(clients) - build = config.get("build", config.get("rhbuild")) - # Prepare the clients - fs_util.prepare_clients(clients, build) - client1 = clients[0] - ran_string = "".join(random.choices("abcdefghijklmnopqrstuvwxyz", k=5)) - start_volume = f"cephfs1_{ran_string}" - fs_util.create_fs(client1, start_volume) - log.info(client1.exec_command(sudo=True, cmd="ceph fs ls")) - volume_name_list = [] - # Try to delete the volume with mon_allow_pool_delete to false - client1.exec_command( - sudo=True, cmd="ceph config set mon mon_allow_pool_delete false" - ) - delete_result, delete_ec = client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rm {start_volume} --yes-i-really-mean-it", - check_ec=False, - ) - if delete_result == 0: - log.error( - "Volume deletetion should not succeed when mon_allow_pool_delete is false" - ) - return 1 - log.info("Volume deletion failed as expected") - # Set mon_allow_pool_delete to true - client1.exec_command( - sudo=True, cmd="ceph config set mon mon_allow_pool_delete true" - ) - # Delete the volume and check if the volume is deleted - delete_result2, delete_ec2 = client1.exec_command( - sudo=True, cmd=f"ceph fs volume rm {start_volume} --yes-i-really-mean-it" - ) - if delete_ec2 == 0: - log.error("Volume deletion failed") - return 1 - else: - log.info("Volume deletion successful") - # Rename the volume - fs_name = f"cephfs1_{ran_string}" - client1.exec_command(sudo=True, cmd=f"ceph fs volume create {fs_name}") - fuse_mounting_dir_1 = f"/mnt/cephfs_fuse_{ran_string}" - fs_util.fuse_mount( - [client1], fuse_mounting_dir_1, extra_params=f" --client_fs {fs_name}" - ) - # fill the cluster up to 50 - cephfs = { - "fill_data": 50, - "io_tool": "smallfile", - "mount": "fuse", - "filesystem": fs_name, - "mount_dir": f"{fuse_mounting_dir_1}", - } - # fill up to 50% of the cluster - fs_io(client=clients[0], fs_config=cephfs, fs_util=fs_util) - with parallel() as p: - p.spawn(fs_util.run_ios(client1, fuse_mounting_dir_1, ["dd", "smallfile"])) - volume_name_list.append(f"cephfs1_{ran_string}") - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rename cephfs1_{ran_string} cephfs2_{ran_string} --yes-i-really-mean-it", - ) - # check if the volume is renamed - rename_result, rename_ec = client1.exec_command(sudo=True, cmd="ceph fs ls") - if "cephfs2" not in rename_result: - log.error("Volume rename failed") - return 1 - else: - log.info("Volume rename successful") - # Rename the volume with additional pools - pool_name = f"cephfs2_data_pool_{ran_string}" - client1.exec_command(sudo=True, cmd=f"ceph osd pool create {pool_name} 128 128") - client1.exec_command( - sudo=True, cmd=f"ceph fs add_data_pool cephfs2_{ran_string} {pool_name}" - ) - volume_name_list.append(f"cephfs2_{ran_string}") - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rename cephfs2_{ran_string} cephfs3_{ran_string} --yes-i-really-mean-it", - ) - vol_list, _ = client1.exec_command(sudo=True, cmd="ceph fs ls") - if "cephfs3" not in vol_list: - log.error("Volume rename failed") - return 1 - else: - log.info("Volume rename successful with additional pools") - # Rename the volume with subvolumegroup and subvolume - group_name = f"cephfs3_subvolumegroup_{ran_string}" - subvol_name = f"cephfs3_subvolume_{ran_string}" - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolumegroup create cephfs3_{ran_string} {group_name}", - ) - time.sleep(5) - client1.exec_command( - sudo=True, - cmd=f"ceph fs subvolume create cephfs3_{ran_string} {subvol_name} --group_name {group_name}", - ) - volume_name_list.append(f"cephfs3_{ran_string}") - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rename cephfs3_{ran_string} cephfs4_{ran_string} --yes-i-really-mean-it", - ) - vol_list, _ = client1.exec_command(sudo=True, cmd="ceph fs ls") - if "cephfs4" not in vol_list: - log.error("Volume rename failed") - return 1 - else: - log.info("Volume rename successful with subvolumegroup and subvolume") - # Rename the volume when the volume is down -> ceph fs fail cephfs4 - client1.exec_command(sudo=True, cmd=f"ceph fs fail cephfs4_{ran_string}") - volume_name_list.append(f"cephfs4_{ran_string}") - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rename cephfs4_{ran_string} cephfs5_{ran_string} --yes-i-really-mean-it", - ) - vol_list, _ = client1.exec_command(sudo=True, cmd="ceph fs ls") - log.info(vol_list) - if "cephfs5" not in vol_list: - log.error("Volume rename failed") - return 1 - else: - log.info("Volume rename successful when the volume is down") - # Rename the volume with and without refuse_client_session flags - log.info("Rename the volume with refuse_client_session flag") - client1.exec_command( - sudo=True, - cmd=f"ceph fs set cephfs5_{ran_string} refuse_client_session true", - ) - volume_name_list.append(f"cephfs5_{ran_string}") - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rename cephfs5_{ran_string} cephfs6_{ran_string} --yes-i-really-mean-it", - ) - vol_list, _ = client1.exec_command(sudo=True, cmd="ceph fs ls") - log.info(vol_list) - if "cephfs6" not in vol_list: - log.error("Volume rename failed") - return 1 - else: - log.info("Volume rename successful with refuse_client_session flag") - log.info("Rename the volume without refuse_client_session flag") - client1.exec_command( - sudo=True, - cmd=f"ceph fs set cephfs6_{ran_string} refuse_client_session false", - ) - volume_name_list.append(f"cephfs6_{ran_string}") - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rename cephfs6_{ran_string} cephfs7_{ran_string} --yes-i-really-mean-it", - ) - volume_name_list.append(f"cephfs7_{ran_string}") - vol_list, _ = client1.exec_command(sudo=True, cmd="ceph fs ls") - log.info(vol_list) - if "cephfs7" not in vol_list: - log.error("Volume rename failed") - return 1 - else: - log.info("Volume rename successful without refuse_client_session flag") - - return 0 - except Exception as e: - log.error(e) - log.error(traceback.format_exc()) - return 1 - finally: - log.info("Cleaning up") - client1.exec_command( - sudo=True, - cmd=f"ceph fs volume rm {volume_name_list[-1]} --yes-i-really-mean-it", - ) - client1.exec_command( - sudo=True, cmd="ceph config set mon mon_allow_pool_delete false" - ) - client1.exec_command( - sudo=True, - cmd=f"ceph osd pool delete {pool_name} {pool_name} --yes-i-really-really-mean-it", - )