diff --git a/data-manifest b/data-manifest index 88aa8fee0..7210be6b5 100644 --- a/data-manifest +++ b/data-manifest @@ -82,7 +82,6 @@ test/features/crm_report_normal.feature test/features/environment.py test/features/geo_setup.feature test/features/healthcheck.feature -test/features/ocfs2.feature test/features/qdevice_options.feature test/features/qdevice_setup_remove.feature test/features/qdevice_usercase.feature @@ -197,7 +196,6 @@ test/unittests/test_gv.py test/unittests/test_handles.py test/unittests/test_lock.py test/unittests/test_objset.py -test/unittests/test_ocfs2.py test/unittests/test_parallax.py test/unittests/test_parse.py test/unittests/test_prun.py diff --git a/test/features/bootstrap_options.feature b/test/features/bootstrap_options.feature index d1eb92906..8d81de6ec 100644 --- a/test/features/bootstrap_options.feature +++ b/test/features/bootstrap_options.feature @@ -42,7 +42,7 @@ Feature: crmsh bootstrap process - options @clean Scenario: Stage validation When Try "crm cluster init fdsf -y" on "hanode1" - Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, corosync, sbd, cluster, ocfs2, admin, qdevice)" in stderr + Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, corosync, sbd, cluster, admin, qdevice)" in stderr When Try "crm cluster join fdsf -y" on "hanode1" Then Expected "Invalid stage: fdsf(available stages: ssh, csync2, ssh_merge, cluster)" in stderr When Try "crm cluster join ssh -y" on "hanode1" diff --git a/test/features/ocfs2.feature b/test/features/ocfs2.feature deleted file mode 100644 index 96a83af84..000000000 --- a/test/features/ocfs2.feature +++ /dev/null @@ -1,61 +0,0 @@ -@ocfs2 -Feature: OCFS2 configuration/verify using bootstrap - -@clean -Scenario: Configure ocfs2 along with init process - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -y" on "hanode1" - Then Cluster service is "started" on "hanode1" - And Service "sbd" is "started" on "hanode1" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" - -@clean -Scenario: Configure cluster lvm2 + ocfs2 with init process - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - And Has disk "/dev/sda3" on "hanode1" - When Run "crm cluster init -s /dev/sda1 -o /dev/sda2 -o /dev/sda3 -C -y" on "hanode1" - Then Cluster service is "started" on "hanode1" - And Service "sbd" is "started" on "hanode1" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - And Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started" - And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" - -@clean -Scenario: Add ocfs2 alone on a running cluster - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - And Has disk "/dev/sda1" on "hanode2" - And Has disk "/dev/sda2" on "hanode2" - When Run "crm cluster init -s /dev/sda1 -y" on "hanode1" - And Run "crm cluster join -c hanode1 -y" on "hanode2" - Then Online nodes are "hanode1 hanode2" - And Service "sbd" is "started" on "hanode1" - And Service "sbd" is "started" on "hanode2" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - When Run "crm cluster init ocfs2 -o /dev/sda2 -y" on "hanode1" - Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" - -@clean -Scenario: Add cluster lvm2 + ocfs2 on a running cluster - Given Has disk "/dev/sda1" on "hanode1" - And Has disk "/dev/sda2" on "hanode1" - And Has disk "/dev/sda1" on "hanode2" - And Has disk "/dev/sda2" on "hanode2" - When Run "crm cluster init -s /dev/sda1 -y" on "hanode1" - And Run "crm cluster join -c hanode1 -y" on "hanode2" - Then Online nodes are "hanode1 hanode2" - And Service "sbd" is "started" on "hanode1" - And Service "sbd" is "started" on "hanode2" - And Resource "stonith-sbd" type "fence_sbd" is "Started" - When Run "crm cluster init ocfs2 -o /dev/sda2 -C -y" on "hanode1" - Then Resource "ocfs2-dlm" type "pacemaker:controld" is "Started" - And Resource "ocfs2-lvmlockd" type "heartbeat:lvmlockd" is "Started" - And Resource "ocfs2-lvmactivate" type "heartbeat:LVM-activate" is "Started" - And Resource "ocfs2-clusterfs" type "heartbeat:Filesystem" is "Started" diff --git a/test/features/steps/const.py b/test/features/steps/const.py index 8962aa6da..08bac0c79 100644 --- a/test/features/steps/const.py +++ b/test/features/steps/const.py @@ -134,18 +134,6 @@ Block device to use for SBD fencing, use ";" as separator or -s multiple times for multi path (up to 3 devices) - -o DEVICE, --ocfs2-device DEVICE - Block device to use for OCFS2; When using Cluster LVM2 - to manage the shared storage, user can specify one or - multiple raw disks, use ";" as separator or -o - multiple times for multi path (must specify -C option) - NOTE: this is a Technical Preview - -C, --cluster-lvm2 Use Cluster LVM2 (only valid together with -o option) - NOTE: this is a Technical Preview - -m MOUNT, --mount-point MOUNT - Mount point for OCFS2 device (default is - /srv/clusterfs, only valid together with -o option) - NOTE: this is a Technical Preview Stage can be one of: ssh Create SSH keys for passwordless SSH between cluster nodes @@ -153,7 +141,6 @@ corosync Configure corosync sbd Configure SBD (requires -s ) cluster Bring the cluster online - ocfs2 Configure OCFS2 (requires -o ) NOTE: this is a Technical Preview admin Create administration virtual IP (optional) qdevice Configure qdevice and qnetd @@ -181,12 +168,6 @@ # Setup the cluster on the current node, with QDevice crm cluster init --qnetd-hostname -y - # Setup the cluster on the current node, with SBD+OCFS2 - crm cluster init -s -o -y - - # Setup the cluster on the current node, with SBD+OCFS2+Cluster LVM - crm cluster init -s -o -o -C -y - # Add SBD on a running cluster crm cluster init sbd -s -y @@ -197,10 +178,7 @@ crm cluster init sbd -S -y # Add QDevice on a running cluster - crm cluster init qdevice --qnetd-hostname -y - - # Add OCFS2+Cluster LVM on a running cluster - crm cluster init ocfs2 -o -o -C -y''' + crm cluster init qdevice --qnetd-hostname -y''' CRM_CLUSTER_JOIN_H_OUTPUT = '''Join existing cluster diff --git a/test/run-functional-tests b/test/run-functional-tests index 37afc158a..b083298df 100755 --- a/test/run-functional-tests +++ b/test/run-functional-tests @@ -14,7 +14,7 @@ HA_NETWORK_ARRAY[1]=$HA_NETWORK_SECOND HA_NETWORK_V6_ARRAY[0]="2001:db8:10::/64" HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64" BEHAVE_CASE_DIR="$(dirname $0)/features/" -BEHAVE_CASE_EXCLUDE="sbd|ocfs2" +BEHAVE_CASE_EXCLUDE="sbd" read -r -d '' SSHD_CONFIG_AZURE << EOM PermitRootLogin no