Skip to content

Commit

Permalink
dbench workflow
Browse files Browse the repository at this point in the history
Signed-off-by: Manimaran M <[email protected]>
  • Loading branch information
Manimaran-MM committed Jan 29, 2025
1 parent 6910465 commit fe063fb
Show file tree
Hide file tree
Showing 2 changed files with 297 additions and 18 deletions.
126 changes: 124 additions & 2 deletions tests/cephfs/cephfs_utilsV1.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,9 @@ def prepare_clients(self, clients, build):
client.node.exec_command(
sudo=True,
cmd=(
"dnf config-manager "
"--add-repo=https://download.fedoraproject.org/pub/epel/9/Everything/x86_64/"
"rhel_version=$(rpm -E %rhel) && "
"dnf config-manager --add-repo="
"https://download.fedoraproject.org/pub/epel/${rhel_version}/Everything/x86_64/"
),
)
client.node.exec_command(
Expand Down Expand Up @@ -3230,12 +3231,67 @@ def dbench():
long_running=True,
)

def postgresIO():
log.info("IO tool scheduled : PostgresIO")

io_params = {
"scale": random.choice(
range(
40, 101, 10
) # The size of the database(test data) increases linearly with the scale factor
),
"workers": random.choice(range(4, 17, 4)),
"clients": random.choice(
range(8, 56, 8) # Randomly selects 8, 16, 24, 32,.. for clients
),
"duration": random.choice(
range(60, 601, 60)
), # Randomly selects 60, 120, ..., 600 seconds
"testdir_prefix": "postgres_io_dir",
"db_name": "",
}

if kwargs.get("postgresIO_params"):
postgresIO_params = kwargs.get("postgresIO_params")
for io_param in io_params:
if postgresIO_params.get(io_param):
io_params[io_param] = postgresIO_params[io_param]

dir_suffix = "".join(
[
random.choice(string.ascii_lowercase + string.digits)
for _ in range(4)
]
)
io_path = f"{mounting_dir}/{io_params['testdir_prefix']}_{dir_suffix}"
client.exec_command(sudo=True, cmd=f"mkdir {io_path}")

# Initialize mode
client.exec_command(
sudo=True,
cmd=f"pgbench -i --scale={io_params['scale']} -U postgres -d {io_params['db_name']}",
long_running=True,
)

# Creating tables and populating data based on the scale factor
client.exec_command(
sudo=True,
cmd=(
f"pgbench -c {io_params['clients']} "
f"-j {io_params['workers']} "
f"-T {io_params['duration']} "
f"-U postgres -d {io_params['db_name']}"
),
long_running=True,
)

io_tool_map = {
"dd": dd,
"smallfile": smallfile,
"wget": wget,
"file_extract": file_extract,
"dbench": dbench,
"postgresIO": postgresIO,
}

log.info(f"IO tools planned to run : {io_tools}")
Expand Down Expand Up @@ -3545,6 +3601,72 @@ def _setup_crefi(node):
# Setup Crefi pre-requisites : pyxattr
node.exec_command(sudo=True, cmd="pip3 install pyxattr", long_running=True)

def setup_postgresql_IO(self, client, mount_dir, db_name):
"""
Setup Steps:
1. Create the PostgreSQL data directory as Mount dir
2. Set permissions and owners for Mount dir
3. Initialise the Postgres Service
4. Create DB
"""

log.info("Stopping PostgresSQL")
client.exec_command(sudo=True, cmd="systemctl stop postgresql", check_ec=False)

log.info("Setting up PostgresSQL")
client.exec_command(sudo=True, cmd=f"mkdir -p {mount_dir}")

log.debug(f"Setting up postgres persmission and user for the dir {mount_dir}")
client.exec_command(sudo=True, cmd=f"chown -R postgres:postgres {mount_dir}")
client.exec_command(sudo=True, cmd=f"chmod 700 {mount_dir}")

try:
log.debug("Initialise the Postgres Service")
client.exec_command(
sudo=True, cmd=f"sudo -u postgres /usr/bin/initdb -D {mount_dir}"
)
except Exception as e:
log.info(
f"Initialising the Postgres Service failed: {e}. Applying the Recovery.."
)
client.exec_command(sudo=True, cmd=f"rm -rf {mount_dir}/*")
client.exec_command(
sudo=True, cmd=f"sudo -u postgres /usr/bin/initdb -D {mount_dir}"
)

config_file = "/usr/lib/systemd/system/postgresql.service"
env_var = f"Environment=PGDATA={mount_dir}"

# Updates the postgres config to point to the correct backend dir
update_dir_command = (
f"sudo sed -i '/^Environment=PGDATA/c\\{env_var}' {config_file} || "
f"echo '{env_var}' | sudo tee -a {config_file} > /dev/null"
)
client.exec_command(sudo=True, cmd=update_dir_command)

client.exec_command(sudo=True, cmd="systemctl daemon-reload")

client.exec_command(sudo=True, cmd="sestatus")
client.exec_command(sudo=True, cmd="setenforce 0")
client.exec_command(sudo=True, cmd="systemctl restart postgresql")
client.exec_command(sudo=True, cmd=f"chcon -R -t postgresql_db_t {mount_dir}")

client.exec_command(sudo=True, cmd="setenforce 1")
client.exec_command(sudo=True, cmd="systemctl restart postgresql")

out, _ = client.exec_command(sudo=True, cmd="systemctl status postgresql")
log.debug(out)
if "active (running)" in out:
log.info("PostgreSQL is running")
else:
log.error("PostgreSQL is not running")

log.info(f"Creating the DB - {db_name}")
client.exec_command(
sudo=True, cmd=f"sudo -u postgres psql -c 'CREATE DATABASE {db_name};'"
)
log.info("DB creation is successful")

def generate_all_combinations(
self, client, ioengine, mount_dir, workloads, sizes, iodepth_values, numjobs
):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,15 +74,30 @@ def run(ceph_cluster, **kw):
"subvol_name": f"{subvolume_name}_4",
"group_name": subvolume_group_name,
},
{
"vol_name": fs_name,
"subvol_name": f"{subvolume_name}_5",
"group_name": subvolume_group_name,
},
{
"vol_name": fs_name,
"subvol_name": f"{subvolume_name}_6",
"group_name": subvolume_group_name,
},
]

# Run the FS lifecycle for 5 iteration
for i in range(1, 6):
log.info(f"Loop {i}: Started file system lifecycle")
log.info(
"\n"
"\n---------------***************---------------"
f"\n Loop {i}: Started file system lifecycle"
"\n---------------***************---------------"
)
try:
fs_util.prepare_clients(clients, build)
fs_util.auth_list(clients)
client1 = clients[0]
client1 = clients[3]

# Creation of FS
fs_util.create_fs(client1, fs_name)
Expand All @@ -99,6 +114,12 @@ def run(ceph_cluster, **kw):
log.info(f"Subvolume Info before IO: {subvolume_size_subvol}")

# Mounting Subvolume1 on Fuse
log.info(
"\n"
"\n---------------***************---------------------"
"\n Step 1: Mounting Subvolume1 on Fuse - SmallFile "
"\n---------------***************---------------------"
)
log.info(f"Mount subvolume {subvolume_name}_1 on Fuse Client")
mounting_dir = "".join(
random.choice(string.ascii_lowercase + string.digits)
Expand Down Expand Up @@ -129,6 +150,12 @@ def run(ceph_cluster, **kw):
)

# Mounting Subvolume2 on Kernel
log.info(
"\n"
"\n---------------***************-----------------------"
"\n Step 2: Mounting Subvolume2 on Kernel - SmallFile "
"\n---------------***************-----------------------"
)
log.info(f"Mount subvolume {subvolume_name}_2 on Kernel Client")
kernel_mount_dir = f"/mnt/cephfs_kernel{mounting_dir}_1/"
mon_node_ips = fs_util.get_mon_node_ips()
Expand Down Expand Up @@ -156,6 +183,12 @@ def run(ceph_cluster, **kw):
)

# Mounting Subvolume3 on nfs
log.info(
"\n"
"\n---------------***************---------------------"
"\n Step 3: Mounting Subvolume3 on NFS - SmallFile "
"\n---------------***************---------------------"
)
log.info(f"Mount subvolume {subvolume_name}_3 on NFS Client")
nfs_servers = ceph_cluster.get_ceph_objects("nfs")
nfs_server = nfs_servers[0].node.hostname
Expand Down Expand Up @@ -209,6 +242,12 @@ def run(ceph_cluster, **kw):
)

# Mounting Subvolume4 on Kernel - Run dbench
log.info(
"\n"
"\n---------------***************---------------------"
"\n Step 4: Mounting Subvolume4 on Kernel - Dbench "
"\n---------------***************---------------------"
)
log.info(f"Mount subvolume {subvolume_name}_4 on Kernel Client")
kernel_mount_dir_dbench = f"/mnt/cephfs_kernel{mounting_dir}_4/"
mon_node_ips = fs_util.get_mon_node_ips()
Expand Down Expand Up @@ -260,6 +299,123 @@ def run(ceph_cluster, **kw):
fs_health_status = fs_util.get_ceph_health_status(client1)
log.debug(f"Output of FS Health status: {fs_health_status}")

# Mounting Subvolume5 on FUSE - Run dbench
log.info(
"\n"
"\n---------------***************---------------------"
"\n Step 5: Mounting Subvolume5 on Fuse - Dbench "
"\n---------------***************---------------------"
)
log.info(f"Mount subvolume {subvolume_name}_5 on Fuse Client")

fuse_dbench_mount_dir = f"/mnt/cephfs_fuse{mounting_dir}_5/"
subvol_path_fuse, rc = client1.exec_command(
sudo=True,
cmd=f"ceph fs subvolume getpath {fs_name} {subvolume_name}_5 {subvolume_group_name}",
)
fs_util.fuse_mount(
[client1],
fuse_dbench_mount_dir,
extra_params=f" -r {subvol_path_fuse.strip()} --client_fs {fs_name}",
)

with parallel() as p:
log.info("Start Writing IO on the subvolume using dbench")

p.spawn(
fs_util.run_ios_V1,
client=client1,
mounting_dir=fuse_dbench_mount_dir,
io_tools=["dbench"],
dbench_params={"duration": 40},
)

# Spawn the health monitoring task
p.spawn(
fs_util.monitor_ceph_health,
client=client1,
retry=4,
interval=10, # Set the interval for health checks during parallel operation
)

# Get ceph fs dump output
fs_dump_dict = fs_util.collect_fs_dump_for_validation(client1, fs_name)
log.debug(f"Output of FS dump: {fs_dump_dict}")

# Get ceph fs get of specific volume
fs_get_dict = fs_util.collect_fs_get_for_validation(client1, fs_name)
log.debug(f"Output of FS get: {fs_get_dict}")

# Get ceph fs status
fs_status_dict = fs_util.collect_fs_status_data_for_validation(
client1, fs_name
)
log.debug(f"Output of FS status: {fs_status_dict}")

fs_health_status = fs_util.get_ceph_health_status(client1)
log.debug(f"Output of FS Health status: {fs_health_status}")

# Mounting Subvolume6 on Kernel - Run postgres IO
log.info(
"\n"
"\n---------------***************------------------------"
"\n Step 6: Mounting Subvolume6 on Kernal - PostgresIO "
"\n---------------***************------------------------"
)
log.info(f"Mount subvolume {subvolume_name}_6 on Kernel Client")
kernel_mount_dir_pgsql = f"/mnt/cephfs_kernel{mounting_dir}_6/"
mon_node_ips = fs_util.get_mon_node_ips()
subvol_path_kernel, rc = client1.exec_command(
sudo=True,
cmd=f"ceph fs subvolume getpath {fs_name} {subvolume_name}_6 {subvolume_group_name}",
)
fs_util.kernel_mount(
[client1],
kernel_mount_dir_pgsql,
",".join(mon_node_ips),
sub_dir=f"{subvol_path_kernel.strip()}",
extra_params=f",fs={fs_name}",
)

db_name = fs_name + "_db_kernel"
fs_util.setup_postgresql_IO(client1, kernel_mount_dir_pgsql, db_name)

with parallel() as p:
log.info("Start Writing IO on the subvolume using postgresql")

p.spawn(
fs_util.run_ios_V1,
client=client1,
mounting_dir=kernel_mount_dir_pgsql,
io_tools=["postgresIO"],
postgresIO_params={"duration": 10, "db_name": db_name},
)

# Spawn the health monitoring task
p.spawn(
fs_util.monitor_ceph_health,
client=client1,
retry=2,
interval=5, # Set the interval for health checks during parallel operation
)

# Get ceph fs dump output
fs_dump_dict = fs_util.collect_fs_dump_for_validation(client1, fs_name)
log.debug(f"Output of FS dump: {fs_dump_dict}")

# Get ceph fs get of specific volume
fs_get_dict = fs_util.collect_fs_get_for_validation(client1, fs_name)
log.debug(f"Output of FS get: {fs_get_dict}")

# Get ceph fs status
fs_status_dict = fs_util.collect_fs_status_data_for_validation(
client1, fs_name
)
log.debug(f"Output of FS status: {fs_status_dict}")

fs_health_status = fs_util.get_ceph_health_status(client1)
log.debug(f"Output of FS Health status: {fs_health_status}")

for subvolume in subvolume_list:
subvolume_size_subvol = fs_util.get_subvolume_info(
client=client1, **subvolume
Expand All @@ -272,21 +428,22 @@ def run(ceph_cluster, **kw):
return 1

finally:
fs_util.client_clean_up(
"umount", fuse_clients=[client1], mounting_dir=fuse_mount_dir
)

fs_util.client_clean_up(
"umount",
kernel_clients=[client1],
mounting_dir=kernel_mount_dir,
)
for mount_dir in [
fuse_mount_dir,
fuse_dbench_mount_dir,
]:
fs_util.client_clean_up(
"umount", fuse_clients=[client1], mounting_dir=mount_dir
)

fs_util.client_clean_up(
"umount",
kernel_clients=[client1],
mounting_dir=kernel_mount_dir_dbench,
)
for mount_dir in [
kernel_mount_dir,
kernel_mount_dir_dbench,
kernel_mount_dir_pgsql,
]:
fs_util.client_clean_up(
"umount", kernel_clients=[client1], mounting_dir=mount_dir
)

client1.exec_command(sudo=True, cmd=f"rm -rf {nfs_mounting_dir}/*")
client1.exec_command(sudo=True, cmd=f"umount {nfs_mounting_dir}")
Expand Down

0 comments on commit fe063fb

Please sign in to comment.