diff --git a/ceph/rados/rados_scrub.py b/ceph/rados/rados_scrub.py index 18d30634836..f2633c8b88d 100644 --- a/ceph/rados/rados_scrub.py +++ b/ceph/rados/rados_scrub.py @@ -1,23 +1,23 @@ """ - This module contains the methods required for scrubbing. +This module contains the methods required for scrubbing. - 1.To set the parameters for scrubbing initially required the - cluster time and day details.get_cluster_date method provides - the details +1.To set the parameters for scrubbing initially required the + cluster time and day details.get_cluster_date method provides + the details - 2.set_osd_configuration method used to set the configuration - parameters on the cluster. +2.set_osd_configuration method used to set the configuration + parameters on the cluster. - 3.get_osd_configuration method is used to get the configured parameters - on the cluster. +3.get_osd_configuration method is used to get the configured parameters + on the cluster. - NOTE: With set_osd_configuration & get_osd_configuration methods can - use to set the get the any OSD configuration parameters. + NOTE: With set_osd_configuration & get_osd_configuration methods can + use to set the get the any OSD configuration parameters. - 4. get_pg_dump method is used to get the pg dump details from the cluster +4. get_pg_dump method is used to get the pg dump details from the cluster - 5. verify_scrub method used for the verification of scheduled scrub - happened or not. +5. verify_scrub method used for the verification of scheduled scrub + happened or not. """ import datetime diff --git a/ceph/rados/utils.py b/ceph/rados/utils.py index 03cf55b73a0..631dde9e811 100644 --- a/ceph/rados/utils.py +++ b/ceph/rados/utils.py @@ -1,9 +1,9 @@ """ - This module contains the wrapper functions to perform general ceph cluster modification operations. - 1. Remove OSD - 2. Add OSD - 3. Set osd out - 3. Zap device path +This module contains the wrapper functions to perform general ceph cluster modification operations. + 1. Remove OSD + 2. Add OSD + 3. Set osd out + 3. Zap device path """ from json import loads diff --git a/tests/ceph_ansible/purge_cluster.py b/tests/ceph_ansible/purge_cluster.py index a337ba11af3..c915071b7bf 100644 --- a/tests/ceph_ansible/purge_cluster.py +++ b/tests/ceph_ansible/purge_cluster.py @@ -1,4 +1,4 @@ -""" Purges the Ceph the cluster""" +"""Purges the Ceph the cluster""" import datetime import re diff --git a/tests/ceph_ansible/purge_dashboard.py b/tests/ceph_ansible/purge_dashboard.py index 4c8ad716688..98fa180e4f8 100644 --- a/tests/ceph_ansible/purge_dashboard.py +++ b/tests/ceph_ansible/purge_dashboard.py @@ -1,4 +1,4 @@ -""" Module to purge ceph dashboard.""" +"""Module to purge ceph dashboard.""" import json diff --git a/tests/misc_env/sosreport.py b/tests/misc_env/sosreport.py index 99a1864b06b..94fe93dfc3a 100644 --- a/tests/misc_env/sosreport.py +++ b/tests/misc_env/sosreport.py @@ -1,4 +1,4 @@ -""" Collecting logs using sosreport from all nodes in cluster Except Client Node.""" +"""Collecting logs using sosreport from all nodes in cluster Except Client Node.""" import re diff --git a/tests/rados/scheduled_scrub_scenarios.py b/tests/rados/scheduled_scrub_scenarios.py index cc993daa2fb..130717ea740 100644 --- a/tests/rados/scheduled_scrub_scenarios.py +++ b/tests/rados/scheduled_scrub_scenarios.py @@ -1,6 +1,6 @@ """ - This module contains the methods required to check the scheduled scrubbing scenarios. - Based on the test cases setting the scrub parameters and verifying the functionality. +This module contains the methods required to check the scheduled scrubbing scenarios. +Based on the test cases setting the scrub parameters and verifying the functionality. """ import os diff --git a/tests/rados/test_bluestore_configs.py b/tests/rados/test_bluestore_configs.py index ef75564592f..28665d7ba34 100644 --- a/tests/rados/test_bluestore_configs.py +++ b/tests/rados/test_bluestore_configs.py @@ -1,4 +1,4 @@ -""" Module to verify scenarios related to BlueStore config changes""" +"""Module to verify scenarios related to BlueStore config changes""" import time diff --git a/tests/rados/test_replica1.py b/tests/rados/test_replica1.py index f3ceb9b3b3e..0571e2c9eca 100644 --- a/tests/rados/test_replica1.py +++ b/tests/rados/test_replica1.py @@ -1,4 +1,4 @@ -""" Test module to verify replica-1 non-resilient pool functionalities""" +"""Test module to verify replica-1 non-resilient pool functionalities""" import time from copy import deepcopy diff --git a/tests/rbd/test_performance_immutable_cache.py b/tests/rbd/test_performance_immutable_cache.py index dfdd712a26a..e95be4e4e90 100644 --- a/tests/rbd/test_performance_immutable_cache.py +++ b/tests/rbd/test_performance_immutable_cache.py @@ -1,26 +1,26 @@ """Test case covered - - CEPH-83581376 - verify the performance with immutable cache - and without immutable cache for IO operations - - Pre-requisites : - 1. Cluster must be up and running with capacity to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files - - Test Case Flow: - 1. Create RBD based pool and an Image - 2. Enable the immutable cache client settings - 3. Install the ceph-immutable-object-cache package - 4. Create a unique Ceph user ID, the keyring - 5. Enable the ceph-immutable-object-cache daemon with created client - 6. Write some data to the image using FIO - 7. Perform snapshot,protect and clone of rbd images - 8. Read the data from cloned images first time with map, mount, unmount - 9. Read the data from cloned images second time with map, mount, unmount - 10. note down the time differnce of first read and second read make sure - second read should be less time compare to first read in cache - 11. Repeat the above operations without immutable cache - 12.check the performance make sure cache gives good performance +CEPH-83581376 - verify the performance with immutable cache +and without immutable cache for IO operations + +Pre-requisites : +1. Cluster must be up and running with capacity to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files + +Test Case Flow: +1. Create RBD based pool and an Image +2. Enable the immutable cache client settings +3. Install the ceph-immutable-object-cache package +4. Create a unique Ceph user ID, the keyring +5. Enable the ceph-immutable-object-cache daemon with created client +6. Write some data to the image using FIO +7. Perform snapshot,protect and clone of rbd images +8. Read the data from cloned images first time with map, mount, unmount +9. Read the data from cloned images second time with map, mount, unmount +10. note down the time differnce of first read and second read make sure +second read should be less time compare to first read in cache +11. Repeat the above operations without immutable cache +12.check the performance make sure cache gives good performance """ from test_rbd_immutable_cache import configure_immutable_cache diff --git a/tests/rbd/test_rbd_compression.py b/tests/rbd/test_rbd_compression.py index b43e3fa8d91..167937cbb65 100644 --- a/tests/rbd/test_rbd_compression.py +++ b/tests/rbd/test_rbd_compression.py @@ -1,19 +1,19 @@ """Test case covered - - CEPH-83574644 - Validate "rbd_compression_hint" config - settings on globally, Pool level, and image level. - - Pre-requisites : - 1. Cluster must be up and running with capacity to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files - - Test Case Flow: - 1. Create a pool and an Image, write some data on it - 2. set bluestore_compression_mode to passive to enable rbd_compression_hint feature - 3. Set compression_algorithm, compression_mode and compression_ratio for the pool - 4. verify "rbd_compression_hint" to "compressible" on global, pool and image level - 5. verify "rbd_compression_hint" to "incompressible" on global, pool and image level - 6. Repeat the above steps for ecpool +CEPH-83574644 - Validate "rbd_compression_hint" config +settings on globally, Pool level, and image level. + +Pre-requisites : +1. Cluster must be up and running with capacity to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files + +Test Case Flow: +1. Create a pool and an Image, write some data on it +2. set bluestore_compression_mode to passive to enable rbd_compression_hint feature +3. Set compression_algorithm, compression_mode and compression_ratio for the pool +4. verify "rbd_compression_hint" to "compressible" on global, pool and image level +5. verify "rbd_compression_hint" to "incompressible" on global, pool and image level +6. Repeat the above steps for ecpool """ import json diff --git a/tests/rbd/test_rbd_dm_cache.py b/tests/rbd/test_rbd_dm_cache.py index d15e85a0c37..05280f3cf00 100644 --- a/tests/rbd/test_rbd_dm_cache.py +++ b/tests/rbd/test_rbd_dm_cache.py @@ -1,23 +1,23 @@ """Test case covered - - CEPH-83575581 - Verify the usage of ceph RBD images as - dm-cache and dm-write cache from LVM side to enhance cache mechanism. - - Pre-requisites : - 1. Cluster must be up and running with capacity to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files - - Test Case Flow: - 1. Create RBD based pool and an Image - 2. get rbd based image disk using rbd map - 3. Create physical volume for RBD based disk - 4. Create volume group for RBD based disk - 5. Create cache disk, meta disk and data disk for the volume group - 6. make disk as dm-cache and dm-write cache based type of cache specified - 7. Create Xfs file system on metadata parted disk for file system purpose - 8. mount some files on cache disk and write some I/O on it. - 9. create snapshot of cache disk image - 10. check ceph health status +CEPH-83575581 - Verify the usage of ceph RBD images as +dm-cache and dm-write cache from LVM side to enhance cache mechanism. + +Pre-requisites : +1. Cluster must be up and running with capacity to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files + +Test Case Flow: +1. Create RBD based pool and an Image +2. get rbd based image disk using rbd map +3. Create physical volume for RBD based disk +4. Create volume group for RBD based disk +5. Create cache disk, meta disk and data disk for the volume group +6. make disk as dm-cache and dm-write cache based type of cache specified +7. Create Xfs file system on metadata parted disk for file system purpose +8. mount some files on cache disk and write some I/O on it. +9. create snapshot of cache disk image +10. check ceph health status """ from tests.rbd.rbd_utils import initial_rbd_config diff --git a/tests/rbd/test_rbd_immutable_cache.py b/tests/rbd/test_rbd_immutable_cache.py index 3e895390788..839d2840001 100644 --- a/tests/rbd/test_rbd_immutable_cache.py +++ b/tests/rbd/test_rbd_immutable_cache.py @@ -1,21 +1,21 @@ """Test case covered - - CEPH-83574134 - Configure immutable object cache daemon - and validate client RBD objects - - Pre-requisites : - 1. Cluster must be up and running with capacity to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files - - Test Case Flow: - 1. Create RBD based pool and an Image - 2. Enable the immutable cache client settings - 3. Install the ceph-immutable-object-cache package - 4. Create a unique Ceph user ID, the keyring - 5. Enable the ceph-immutable-object-cache daemon with created client - 6. Write some data to the image using FIO - 7. Perform snapshot,protect and clone of rbd images - 8. Read the cloned images from the cache path +CEPH-83574134 - Configure immutable object cache daemon +and validate client RBD objects + +Pre-requisites : +1. Cluster must be up and running with capacity to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files + +Test Case Flow: +1. Create RBD based pool and an Image +2. Enable the immutable cache client settings +3. Install the ceph-immutable-object-cache package +4. Create a unique Ceph user ID, the keyring +5. Enable the ceph-immutable-object-cache daemon with created client +6. Write some data to the image using FIO +7. Perform snapshot,protect and clone of rbd images +8. Read the cloned images from the cache path """ import time diff --git a/tests/rbd/test_rbd_immutable_cache_cluster_operations.py b/tests/rbd/test_rbd_immutable_cache_cluster_operations.py index e7218cd402b..59de9f0cbe3 100644 --- a/tests/rbd/test_rbd_immutable_cache_cluster_operations.py +++ b/tests/rbd/test_rbd_immutable_cache_cluster_operations.py @@ -1,23 +1,23 @@ """Perform cluster-related operations along with - immutable object cache test parallel. +immutable object cache test parallel. - Pre-requisites : - We need atleast one client node with ceph-common and fio packages, - conf and keyring files +Pre-requisites : +We need atleast one client node with ceph-common and fio packages, +conf and keyring files - Test cases covered - - CEPH-83574132 - Immutable object cache with cluster operations. +Test cases covered - +CEPH-83574132 - Immutable object cache with cluster operations. - Test Case Flow - +Test Case Flow - - 1)Create multiple rbd pool and multiple images using rbd commands - 2)Write some data to the images using FIO - 3)Create multiple clones images in different pools from the multiple parent image created - 4)Read the cloned images of different pools in parallel and check the cache status - 5) Restart the Mon, OSD, and cluster target and parallelly run the IO and cache status - 6) Remove mon and add mon back to cluster with parallelly run the IO and cache status - 8) check ceph health status - 9) Perform test on both Replicated and EC pool +1)Create multiple rbd pool and multiple images using rbd commands +2)Write some data to the images using FIO +3)Create multiple clones images in different pools from the multiple parent image created +4)Read the cloned images of different pools in parallel and check the cache status +5) Restart the Mon, OSD, and cluster target and parallelly run the IO and cache status +6) Remove mon and add mon back to cluster with parallelly run the IO and cache status +8) check ceph health status +9) Perform test on both Replicated and EC pool """ import time diff --git a/tests/rbd_mirror/rbd_mirror_reconfigure_primary_cluster.py b/tests/rbd_mirror/rbd_mirror_reconfigure_primary_cluster.py index 0816a2d3c17..ec6253f5c7b 100644 --- a/tests/rbd_mirror/rbd_mirror_reconfigure_primary_cluster.py +++ b/tests/rbd_mirror/rbd_mirror_reconfigure_primary_cluster.py @@ -1,22 +1,22 @@ """Test case covered - - CEPH-9476- Primary cluster permanent failure, - Recreate the primary cluster and Re-establish mirror with newly created cluster as Primary. - - Pre-requisites : - 1. Cluster must be up and running with capacity to create pool - (At least with 64 pgs) - 2. We need atleast one client node with ceph-common package, - conf and keyring files - - Test case flows: - 1) After site-a failure, promote site-b cluster as primary - 2) Create a pool with same name as secondary pool has - 3) Perform the mirroring bootstrap for cluster peers - 4) copy and import bootstrap token to peer cluster - 5) verify peer cluster got added successfully after failback - 6) verify all the images from secondary mirrored to primary - 7) Demote initially promoted secondary as primary - 8) Promote newly created mirrored cluster as primary +CEPH-9476- Primary cluster permanent failure, +Recreate the primary cluster and Re-establish mirror with newly created cluster as Primary. + +Pre-requisites : +1. Cluster must be up and running with capacity to create pool +(At least with 64 pgs) +2. We need atleast one client node with ceph-common package, +conf and keyring files + +Test case flows: +1) After site-a failure, promote site-b cluster as primary +2) Create a pool with same name as secondary pool has +3) Perform the mirroring bootstrap for cluster peers +4) copy and import bootstrap token to peer cluster +5) verify peer cluster got added successfully after failback +6) verify all the images from secondary mirrored to primary +7) Demote initially promoted secondary as primary +8) Promote newly created mirrored cluster as primary """ # import datetime diff --git a/tests/rbd_mirror/rbd_mirror_reconfigure_secondary_cluster.py b/tests/rbd_mirror/rbd_mirror_reconfigure_secondary_cluster.py index 4bf182b5621..2a0bb4414f2 100644 --- a/tests/rbd_mirror/rbd_mirror_reconfigure_secondary_cluster.py +++ b/tests/rbd_mirror/rbd_mirror_reconfigure_secondary_cluster.py @@ -1,20 +1,20 @@ """Test case covered - - CEPH-9477- Secondary cluster permanent failure, - Recreate the secondary cluster and Re-establish mirror with newly created cluster as secondary. - - Pre-requisites : - 1. Cluster must be up and running with capacity to create pool - (At least with 64 pgs) - 2. We need atleast one client node with ceph-common package, - conf and keyring files - - Test case flows: - 1) After site-b failure, bring up new cluster for secondary - 2) Create a pool with same name as secondary pool - 3) Perform the mirroring bootstrap for cluster peers - 4) copy and import bootstrap token to peer cluster - 5) verify peer cluster got added successfully after failover - 6) verify all the images from primary mirrored to secondary +CEPH-9477- Secondary cluster permanent failure, +Recreate the secondary cluster and Re-establish mirror with newly created cluster as secondary. + +Pre-requisites : +1. Cluster must be up and running with capacity to create pool +(At least with 64 pgs) +2. We need atleast one client node with ceph-common package, +conf and keyring files + +Test case flows: +1) After site-b failure, bring up new cluster for secondary +2) Create a pool with same name as secondary pool +3) Perform the mirroring bootstrap for cluster peers +4) copy and import bootstrap token to peer cluster +5) verify peer cluster got added successfully after failover +6) verify all the images from primary mirrored to secondary """ import ast diff --git a/tests/rbd_mirror/test_rbd_mirror_cloned_image.py b/tests/rbd_mirror/test_rbd_mirror_cloned_image.py index 8286845b67e..14a21a79174 100644 --- a/tests/rbd_mirror/test_rbd_mirror_cloned_image.py +++ b/tests/rbd_mirror/test_rbd_mirror_cloned_image.py @@ -1,14 +1,14 @@ """Test case covered - CEPH-83576099 - Test Case Flow: - 1. Configure snapshot based mirroring between two clusters - 2. create snapshots of an image - 3. protect the snapshot of an image - 4. clone the snapshot to new image - 5. Tried to enable cloned images for snapshot-based mirroring it should not allow - 6. Flatten the cloned image - 7. Enable snapshot based mirroring for the flattened child image - 8. Perform test steps for both Replicated and EC pool +Test Case Flow: +1. Configure snapshot based mirroring between two clusters +2. create snapshots of an image +3. protect the snapshot of an image +4. clone the snapshot to new image +5. Tried to enable cloned images for snapshot-based mirroring it should not allow +6. Flatten the cloned image +7. Enable snapshot based mirroring for the flattened child image +8. Perform test steps for both Replicated and EC pool """ from tests.rbd.rbd_utils import Rbd diff --git a/tests/rbd_mirror/test_rbd_mirror_connection_metrics.py b/tests/rbd_mirror/test_rbd_mirror_connection_metrics.py index 287dbee9982..4075118f80b 100644 --- a/tests/rbd_mirror/test_rbd_mirror_connection_metrics.py +++ b/tests/rbd_mirror/test_rbd_mirror_connection_metrics.py @@ -1,20 +1,20 @@ """Test case covered - - CEPH-83575566 - Performance counter for rbd mirror network - connection health metrics. - - Pre-requisites : - 1. Two Clusters must be up and running to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files on each node. - - Test Case Flow: - 1. Create a pool on both clusters. - 2. Create an Image on primary mirror cluster in same pool. - 3. Configure mirroring (peer bootstrap) between two clusters. - 4. Enable image mode snapshot based mirroring on the pool respectively. - 5. Start running IOs on the primary image. - 6. Verify ceph exporter from prometheus for network connection - health metrics. +CEPH-83575566 - Performance counter for rbd mirror network +connection health metrics. + +Pre-requisites : +1. Two Clusters must be up and running to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files on each node. + +Test Case Flow: + 1. Create a pool on both clusters. + 2. Create an Image on primary mirror cluster in same pool. + 3. Configure mirroring (peer bootstrap) between two clusters. + 4. Enable image mode snapshot based mirroring on the pool respectively. + 5. Start running IOs on the primary image. + 6. Verify ceph exporter from prometheus for network connection + health metrics. """ from tests.rbd_mirror import rbd_mirror_utils as rbdmirror diff --git a/tests/rbd_mirror/test_rbd_mirror_image_features.py b/tests/rbd_mirror/test_rbd_mirror_image_features.py index 84b12534827..2fa899ef013 100644 --- a/tests/rbd_mirror/test_rbd_mirror_image_features.py +++ b/tests/rbd_mirror/test_rbd_mirror_image_features.py @@ -1,16 +1,16 @@ """Test case covered - - CEPH-9520 - When a both Pool and image based mirroring is established, - verify if change of any image features in primary site should reflect - on remote site upon modification. - - Test Case Flow: - 1. Create a pool on both clusters. - 2. Configure mirroring (peer bootstrap) between two clusters. - 3. Enable pool mode journal based mirroring on the pool respectively. - 4. Verify if change of any image features in primary site gets mirrored in secondary site. - 5. Enable image mode snapshot based mirroring on the pool respectively. - 6. Verify if change of any image features in primary site gets mirrored in secondary site. - 7. perform test on both replicated and EC pool +CEPH-9520 - When a both Pool and image based mirroring is established, +verify if change of any image features in primary site should reflect +on remote site upon modification. + +Test Case Flow: +1. Create a pool on both clusters. +2. Configure mirroring (peer bootstrap) between two clusters. +3. Enable pool mode journal based mirroring on the pool respectively. +4. Verify if change of any image features in primary site gets mirrored in secondary site. +5. Enable image mode snapshot based mirroring on the pool respectively. +6. Verify if change of any image features in primary site gets mirrored in secondary site. +7. perform test on both replicated and EC pool """ import datetime diff --git a/tests/rbd_mirror/test_rbd_mirror_journal_metrics.py b/tests/rbd_mirror/test_rbd_mirror_journal_metrics.py index b111b1a08c1..613d924943d 100644 --- a/tests/rbd_mirror/test_rbd_mirror_journal_metrics.py +++ b/tests/rbd_mirror/test_rbd_mirror_journal_metrics.py @@ -1,19 +1,19 @@ """Test case covered - - CEPH-83575564 - Performance counter metrics for journal based - mirroring. - - Pre-requisites : - 1. Two Clusters must be up and running to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files on each node. - - Test Case Flow: - 1. Create a pool on both clusters. - 2. Create an Image on primary mirror cluster in same pool. - 3. Configure mirroring (peer bootstrap) between two clusters. - 4. Enable pool journal based mirroring on the pool respectively. - 5. Start running IOs on the primary image. - 6. Verify journal mirror based performance counter metrics. +CEPH-83575564 - Performance counter metrics for journal based +mirroring. + +Pre-requisites : +1. Two Clusters must be up and running to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files on each node. + +Test Case Flow: + 1. Create a pool on both clusters. + 2. Create an Image on primary mirror cluster in same pool. + 3. Configure mirroring (peer bootstrap) between two clusters. + 4. Enable pool journal based mirroring on the pool respectively. + 5. Start running IOs on the primary image. + 6. Verify journal mirror based performance counter metrics. """ from ceph.rbd.workflows.rbd_mirror_metrics import create_symlink_and_get_metrics diff --git a/tests/rbd_mirror/test_rbd_mirror_peer_id.py b/tests/rbd_mirror/test_rbd_mirror_peer_id.py index 81751bb8e50..90c1e328530 100644 --- a/tests/rbd_mirror/test_rbd_mirror_peer_id.py +++ b/tests/rbd_mirror/test_rbd_mirror_peer_id.py @@ -1,21 +1,21 @@ """Test case covered - CEPH-83590607 - Test Case Flow: - 1. Set up bidirectional mirroring on a test pool as usual - 2. Verify that "rbd mirror pool status" reports "health: OK" on both clusters - 3. Grab service_id and instance_id from "rbd mirror pool status --verbose" output on cluster B - 4. Grab peer UUID from "rbd mirror pool info" output on cluster B - 5. Set wrong client id to mirror peer client using - "rbd mirror pool peer set client client.invalid" command on cluster B - 6. Wait 30-90 seconds and verify that "rbd mirror pool status" reports "health: ERROR" on cluster B - and "health: WARNING" on cluster A - 7. Reset correct client id to mirror peer client using - "rbd mirror pool peer set client client.rbd-mirror-peer" command on cluster B - 8. Wait 30-90 seconds and verify that "rbd mirror pool status" reports "health: OK" on both clusters again - 9. Grab service_id and instance_id from "rbd mirror pool status --verbose" output on cluster B again - 10. Verify that service_id from step 3 is equal to the one from step 9 - 11. Verify that instance_id from step 3 is not same than the one from step 9 - 8. Perform test steps for both Replicated and EC pool +Test Case Flow: +1. Set up bidirectional mirroring on a test pool as usual +2. Verify that "rbd mirror pool status" reports "health: OK" on both clusters +3. Grab service_id and instance_id from "rbd mirror pool status --verbose" output on cluster B +4. Grab peer UUID from "rbd mirror pool info" output on cluster B +5. Set wrong client id to mirror peer client using + "rbd mirror pool peer set client client.invalid" command on cluster B +6. Wait 30-90 seconds and verify that "rbd mirror pool status" reports "health: ERROR" on cluster B + and "health: WARNING" on cluster A +7. Reset correct client id to mirror peer client using + "rbd mirror pool peer set client client.rbd-mirror-peer" command on cluster B +8. Wait 30-90 seconds and verify that "rbd mirror pool status" reports "health: OK" on both clusters again +9. Grab service_id and instance_id from "rbd mirror pool status --verbose" output on cluster B again +10. Verify that service_id from step 3 is equal to the one from step 9 +11. Verify that instance_id from step 3 is not same than the one from step 9 +8. Perform test steps for both Replicated and EC pool """ import json diff --git a/tests/rbd_mirror/test_rbd_mirror_reconfig.py b/tests/rbd_mirror/test_rbd_mirror_reconfig.py index 5097a447c0d..69168acc462 100644 --- a/tests/rbd_mirror/test_rbd_mirror_reconfig.py +++ b/tests/rbd_mirror/test_rbd_mirror_reconfig.py @@ -1,11 +1,11 @@ """Test case covered - - CEPH-9511 - Attempt creating mirror in same cluster. This should be prevented +CEPH-9511 - Attempt creating mirror in same cluster. This should be prevented - Test Case Flow: - 1. Follow the latest official Block device Doc to configure RBD Mirroring - 2. Attempt creating mirror in same cluster as primary and secondary this should be prevented - 3. CLI should not provide any option to specify same cluster source and target - or it detects the condition where source and target cluster for the mirror same and prints an error message. +Test Case Flow: +1. Follow the latest official Block device Doc to configure RBD Mirroring +2. Attempt creating mirror in same cluster as primary and secondary this should be prevented +3. CLI should not provide any option to specify same cluster source and target +or it detects the condition where source and target cluster for the mirror same and prints an error message. """ from ceph.parallel import parallel diff --git a/tests/rbd_mirror/test_rbd_mirror_replica_count.py b/tests/rbd_mirror/test_rbd_mirror_replica_count.py index 2dd741cbf0e..e0835b461cb 100644 --- a/tests/rbd_mirror/test_rbd_mirror_replica_count.py +++ b/tests/rbd_mirror/test_rbd_mirror_replica_count.py @@ -1,25 +1,25 @@ """Test case covered - - CEPH-9518 - Increase/Reduce replica count of the Primary Pool - while the mirror job is in progress - - Pre-requisites : - 1. Two Clusters must be up and running to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files on each node. - - Test Case Flow: - 1. Create a pool on both clusters. - 2. Create an Image on primary mirror cluster in same pool. - 3. Configure mirroring (peer bootstrap) between two clusters. - 4. Enable pool mode journal based mirroring on the pool respectively. - 5. Start running IOs on the primary image. - 6. While IOs is running decrease the replica count of mirror pool, - (if rep count is 3 decrease to 2). - 7. IO's should not stop and cluster health status should be healthy - 8. While IOs is running increase the replica count of mirror pool, - (if rep count is 2 increase to 3). - 9. IO's should not stop and cluster health status should be healthy - 10. check data consistency among mirror clusters. +CEPH-9518 - Increase/Reduce replica count of the Primary Pool +while the mirror job is in progress + +Pre-requisites : +1. Two Clusters must be up and running to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files on each node. + +Test Case Flow: + 1. Create a pool on both clusters. + 2. Create an Image on primary mirror cluster in same pool. + 3. Configure mirroring (peer bootstrap) between two clusters. + 4. Enable pool mode journal based mirroring on the pool respectively. + 5. Start running IOs on the primary image. + 6. While IOs is running decrease the replica count of mirror pool, + (if rep count is 3 decrease to 2). + 7. IO's should not stop and cluster health status should be healthy + 8. While IOs is running increase the replica count of mirror pool, + (if rep count is 2 increase to 3). + 9. IO's should not stop and cluster health status should be healthy + 10. check data consistency among mirror clusters. """ from ceph.parallel import parallel diff --git a/tests/rbd_mirror/test_rbd_mirror_secondary_full.py b/tests/rbd_mirror/test_rbd_mirror_secondary_full.py index 65c5ff0a0ce..f7c6bea0d90 100644 --- a/tests/rbd_mirror/test_rbd_mirror_secondary_full.py +++ b/tests/rbd_mirror/test_rbd_mirror_secondary_full.py @@ -1,20 +1,20 @@ """Test case covered - - CEPH-9507 - Create a mirror image, when secondary cluster - doesn't have enough space left to mirror the image copy - - Pre-requisites : - 1. Two Clusters must be up and running to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files on each node. - - Test Case Flow: - 1. Create a pool on both clusters. - 2. Create an Image on primary mirror cluster in same pool. - 3. Configure mirroring (peer bootstrap) between two clusters. - 4. Enable image mode snapshot based mirroring on the pool respectively. - 5. Start running IOs on the primary image. - 6. keep on running IOs till secondary cluster becomes full - 7. verify after secondary becomes full, mirroring should fail +CEPH-9507 - Create a mirror image, when secondary cluster +doesn't have enough space left to mirror the image copy + +Pre-requisites : +1. Two Clusters must be up and running to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files on each node. + +Test Case Flow: + 1. Create a pool on both clusters. + 2. Create an Image on primary mirror cluster in same pool. + 3. Configure mirroring (peer bootstrap) between two clusters. + 4. Enable image mode snapshot based mirroring on the pool respectively. + 5. Start running IOs on the primary image. + 6. keep on running IOs till secondary cluster becomes full + 7. verify after secondary becomes full, mirroring should fail """ from ceph.parallel import parallel diff --git a/tests/rbd_mirror/test_rbd_mirror_snapshot.py b/tests/rbd_mirror/test_rbd_mirror_snapshot.py index 35420e3d9d5..97754c63137 100644 --- a/tests/rbd_mirror/test_rbd_mirror_snapshot.py +++ b/tests/rbd_mirror/test_rbd_mirror_snapshot.py @@ -1,15 +1,15 @@ """Test case covered - - CEPH-83575375 and CEPH-83575376 - - Test Case Flow: - 1. Configure snapshot based mirroring between two clusters - 2. Add mirror snapshot schedule at cluster and pool level - 3. create some images and wait for them to mirror to secondary - 4. Check that mirror snapshots are created for each images - 5. Create a snapshot mirror schedule and list the snapshots - 6. View the status of schedule snapshot - 7. remove the snapshot schedule from cluster and pool level, verify the same - 8. Perform test steps for both Replicated and EC pool +CEPH-83575375 and CEPH-83575376 + +Test Case Flow: +1. Configure snapshot based mirroring between two clusters +2. Add mirror snapshot schedule at cluster and pool level +3. create some images and wait for them to mirror to secondary +4. Check that mirror snapshots are created for each images +5. Create a snapshot mirror schedule and list the snapshots +6. View the status of schedule snapshot +7. remove the snapshot schedule from cluster and pool level, verify the same +8. Perform test steps for both Replicated and EC pool """ from tests.rbd_mirror.rbd_mirror_utils import rbd_mirror_config diff --git a/tests/rbd_mirror/test_rbd_mirror_snapshot_metrics.py b/tests/rbd_mirror/test_rbd_mirror_snapshot_metrics.py index b456866b189..15a677c6a4c 100644 --- a/tests/rbd_mirror/test_rbd_mirror_snapshot_metrics.py +++ b/tests/rbd_mirror/test_rbd_mirror_snapshot_metrics.py @@ -1,19 +1,19 @@ """Test case covered - - CEPH-83575565 - Performance counter metrics for snapshot based - mirroring. - - Pre-requisites : - 1. Two Clusters must be up and running to create pool - 2. We need atleast one client node with ceph-common package, - conf and keyring files on each node. - - Test Case Flow: - 1. Create a pool on both clusters. - 2. Create an Image on primary mirror cluster in same pool. - 3. Configure mirroring (peer bootstrap) between two clusters. - 4. Enable image mode snapshot based mirroring on the pool respectively. - 5. Start running IOs on the primary image. - 6. Verify snapshot mirror based performance conter metrics. +CEPH-83575565 - Performance counter metrics for snapshot based +mirroring. + +Pre-requisites : +1. Two Clusters must be up and running to create pool +2. We need atleast one client node with ceph-common package, + conf and keyring files on each node. + +Test Case Flow: + 1. Create a pool on both clusters. + 2. Create an Image on primary mirror cluster in same pool. + 3. Configure mirroring (peer bootstrap) between two clusters. + 4. Enable image mode snapshot based mirroring on the pool respectively. + 5. Start running IOs on the primary image. + 6. Verify snapshot mirror based performance conter metrics. """ from ceph.rbd.workflows.rbd_mirror_metrics import create_symlink_and_get_metrics diff --git a/utility/ibm_dns_cleanup.py b/utility/ibm_dns_cleanup.py index 7cde64f37c1..bb10d56839f 100644 --- a/utility/ibm_dns_cleanup.py +++ b/utility/ibm_dns_cleanup.py @@ -1,5 +1,5 @@ """ - Utility to cleanup orphan DNS record from IBM environment +Utility to cleanup orphan DNS record from IBM environment """ import math diff --git a/utility/ibm_volume_cleanup.py b/utility/ibm_volume_cleanup.py index 4e6d8b9a2c7..ddae7ca087a 100644 --- a/utility/ibm_volume_cleanup.py +++ b/utility/ibm_volume_cleanup.py @@ -1,5 +1,5 @@ """ - Utility to cleanup orphan volumes from IBM environment +Utility to cleanup orphan volumes from IBM environment """ import sys diff --git a/utility/psi_remove_vms.py b/utility/psi_remove_vms.py index e830fd8c764..f1f423d0386 100644 --- a/utility/psi_remove_vms.py +++ b/utility/psi_remove_vms.py @@ -1,14 +1,14 @@ """ - Utility to cleanup instances in RHOS-D environment that have crossed the maximum - allowed duration and instances that are in error status. There are multiple - projects/tenants under CephQE purview and each of them have a different configuration. - - For example, ceph-jenkins has the least allowable time as the intent is to enable - the pipeline is executed under a stable environment. The durations for each project - are - ceph-jenkins 3 days - ceph-ci 1 week - ceph-core 1 weeks +Utility to cleanup instances in RHOS-D environment that have crossed the maximum +allowed duration and instances that are in error status. There are multiple +projects/tenants under CephQE purview and each of them have a different configuration. + +For example, ceph-jenkins has the least allowable time as the intent is to enable +the pipeline is executed under a stable environment. The durations for each project +are + ceph-jenkins 3 days + ceph-ci 1 week + ceph-core 1 weeks """ import smtplib