diff --git a/tests/api2/test_011_user.py b/tests/api2/test_011_user.py
deleted file mode 100644
index 14eb8fe3cedf0..0000000000000
--- a/tests/api2/test_011_user.py
+++ /dev/null
@@ -1,694 +0,0 @@
-import contextlib
-import dataclasses
-import os
-import time
-import stat
-
-import pytest
-from pytest_dependency import depends
-
-from truenas_api_client import ClientException
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import user as user_asset
-from middlewared.test.integration.assets.pool import dataset as dataset_asset
-from middlewared.test.integration.utils import call, ssh
-
-from functions import SSH_TEST, wait_on_job
-from auto_config import pool_name, password, user
-SHELL = '/usr/bin/bash'
-VAR_EMPTY = '/var/empty'
-ROOT_GROUP = 'root'
-DEFAULT_HOMEDIR_OCTAL = 0o40700
-SMB_CONFIGURED_SENTINEL = '/var/run/samba/.configured'
-
-
-@dataclasses.dataclass
-class HomeAssets:
-    HOME_FILES = {
-        'depends_name': '',
-        'files': {
-            '~/': oct(DEFAULT_HOMEDIR_OCTAL),
-            '~/.profile': '0o100644',
-            '~/.ssh': '0o40700',
-            '~/.ssh/authorized_keys': '0o100600',
-        }
-    }
-    Dataset01 = {
-        'depends_name': 'HOME_DS_CREATED',
-        'create_payload': {
-            'name': f'{pool_name}/test_homes',
-            'share_type': 'SMB',
-            'acltype': 'NFSV4',
-            'aclmode': 'RESTRICTED'
-        },
-        'home_acl': [
-            {
-                "tag": "owner@",
-                "id": None,
-                "type": "ALLOW",
-                "perms": {"BASIC": "FULL_CONTROL"},
-                "flags": {"BASIC": "INHERIT"}
-            },
-            {
-                "tag": "group@",
-                "id": None,
-                "type": "ALLOW",
-                "perms": {"BASIC": "FULL_CONTROL"},
-                "flags": {"BASIC": "INHERIT"}
-            },
-            {
-                "tag": "everyone@",
-                "id": None,
-                "type": "ALLOW",
-                "perms": {"BASIC": "TRAVERSE"},
-                "flags": {"BASIC": "NOINHERIT"}
-            },
-        ],
-        'new_home': 'new_home',
-    }
-
-
-@dataclasses.dataclass
-class UserAssets:
-    TestUser01 = {
-        'depends_name': 'user_01',
-        'query_response': dict(),
-        'get_user_obj_response': dict(),
-        'create_payload': {
-            'username': 'testuser',
-            'full_name': 'Test User',
-            'group_create': True,
-            'password': 'test1234',
-            'uid': None,
-            'smb': False,
-            'shell': SHELL
-        }
-    }
-    TestUser02 = {
-        'depends_name': 'user_02',
-        'query_response': dict(),
-        'get_user_obj_response': dict(),
-        'create_payload': {
-            'username': 'testuser2',
-            'full_name': 'Test User2',
-            'group_create': True,
-            'password': 'test1234',
-            'uid': None,
-            'shell': SHELL,
-            'sshpubkey': 'canary',
-            'home': f'/mnt/{HomeAssets.Dataset01["create_payload"]["name"]}',
-            'home_mode': f'{stat.S_IMODE(DEFAULT_HOMEDIR_OCTAL):03o}',
-            'home_create': True,
-        },
-        'filename': 'testfile_01',
-    }
-    ShareUser01 = {
-        'depends_name': 'share_user_01',
-        'query_response': dict(),
-        'get_user_obj_reasponse': dict(),
-        'create_payload': {
-            'username': 'shareuser',
-            'full_name': 'Share User',
-            'group_create': True,
-            'groups': [],
-            'password': 'testing',
-            'uid': None,
-            'shell': SHELL
-        }
-    }
-
-
-def check_config_file(file_name, expected_line):
-    results = SSH_TEST(f'cat {file_name}', user, password)
-    assert results['result'], results['output']
-    assert expected_line in results['stdout'].splitlines(), results['output']
-
-
-@contextlib.contextmanager
-def create_user_with_dataset(ds_info, user_info):
-    with dataset_asset(ds_info['name'], ds_info.get('options', []), **ds_info.get('kwargs', {})) as ds:
-        if 'path' in user_info:
-            user_info['payload']['home'] = os.path.join(f'/mnt/{ds}', user_info['path'])
-
-        user_id = None
-        try:
-            user_id = call('user.create', user_info['payload'])
-            yield call('user.query', [['id', '=', user_id]], {'get': True})
-        finally:
-            if user_id is not None:
-                call('user.delete', user_id, {"delete_group": True})
-
-
-@pytest.mark.dependency(name=UserAssets.TestUser01['depends_name'])
-def test_001_create_and_verify_testuser():
-    """
-    Test for basic user creation. In this case 'smb' is disabled to bypass
-    passdb-related code. This is because the passdb add relies on users existing
-    in passwd database, and errors during error creation will get masked as
-    passdb errors.
-    """
-    UserAssets.TestUser01['create_payload']['uid'] = call('user.get_next_uid')
-    call('user.create', UserAssets.TestUser01['create_payload'])
-    username = UserAssets.TestUser01['create_payload']['username']
-    qry = call(
-        'user.query',
-        [['username', '=', username]],
-        {'get': True, 'extra': {'additional_information': ['SMB']}}
-    )
-    UserAssets.TestUser01['query_response'].update(qry)
-
-    # verify basic info
-    for key in ('username', 'full_name', 'shell'):
-        assert qry[key] == UserAssets.TestUser01['create_payload'][key]
-
-    # verify various /etc files were updated
-    for f in (
-        {
-            'file': '/etc/shadow',
-            'value': f'{username}:{qry["unixhash"]}:18397:0:99999:7:::'
-        },
-        {
-            'file': '/etc/passwd',
-            'value': f'{username}:x:{qry["uid"]}:{qry["group"]["bsdgrp_gid"]}:{qry["full_name"]}:{qry["home"]}:{qry["shell"]}'
-        },
-        {
-            'file': '/etc/group',
-            'value': f'{qry["group"]["bsdgrp_group"]}:x:{qry["group"]["bsdgrp_gid"]}:'
-        }
-    ):
-        check_config_file(f['file'], f['value'])
-
-    # verify password doesn't leak to middlewared.log
-    # we do this inside the create and verify function
-    # because this is severe enough problem that we should
-    # just "fail" at this step so it sets off a bunch of
-    # red flags in the CI
-    results = SSH_TEST(
-        f'grep -R {UserAssets.TestUser01["create_payload"]["password"]!r} /var/log/middlewared.log',
-        user, password
-    )
-    assert results['result'] is False, str(results['output'])
-
-    # non-smb users shouldn't show up in smb's passdb
-    assert qry['sid'] is None
-
-
-def test_002_verify_user_exists_in_pwd(request):
-    """
-    get_user_obj is a wrapper around the pwd module.
-    This check verifies that the user is _actually_ created.
-    """
-    depends(request, [UserAssets.TestUser01['depends_name']])
-    pw = call(
-        'user.get_user_obj',
-        {'username': UserAssets.TestUser01['create_payload']['username'], 'sid_info': True}
-    )
-    UserAssets.TestUser01['get_user_obj_response'].update(pw)
-
-    # Verify pwd info
-    assert pw['pw_uid'] == UserAssets.TestUser01['query_response']['uid']
-    assert pw['pw_shell'] == UserAssets.TestUser01['query_response']['shell']
-    assert pw['pw_gecos'] == UserAssets.TestUser01['query_response']['full_name']
-    assert pw['pw_dir'] == VAR_EMPTY
-
-    # At this point, we're not an SMB user
-    assert pw['sid'] is None
-    assert pw['source'] == 'LOCAL'
-    assert pw['local'] is True
-
-
-def test_003_get_next_uid_again(request):
-    """user.get_next_uid should always return a unique uid"""
-    depends(request, [UserAssets.TestUser01['depends_name']])
-    assert call('user.get_next_uid') != UserAssets.TestUser01['create_payload']['uid']
-
-
-def test_004_update_and_verify_user_groups(request):
-    """Add the user to the root users group"""
-    depends(request, [UserAssets.TestUser01['depends_name']])
-    root_group_info = call(
-        'group.query', [['group', '=', ROOT_GROUP]], {'get': True}
-    )
-    call(
-        'user.update',
-        UserAssets.TestUser01['query_response']['id'],
-        {'groups': [root_group_info['id']]}
-    )
-
-    grouplist = call(
-        'user.get_user_obj',
-        {'username': UserAssets.TestUser01['create_payload']['username'], 'get_groups': True}
-    )['grouplist']
-    assert root_group_info['gid'] in grouplist
-
-
-@pytest.mark.dependency(name='SMB_CONVERT')
-def test_005_convert_non_smbuser_to_smbuser(request):
-    depends(request, [UserAssets.TestUser01['depends_name']])
-    with pytest.raises(ValidationErrors):
-        """
-        SMB auth for local users relies on a stored NT hash. We only generate this hash
-        for SMB users. This means that converting from non-SMB to SMB requires
-        re-submitting password so that we can generate the required hash. If
-        payload submitted without password, then validation error _must_ be raised.
-        """
-        call('user.update', UserAssets.TestUser01['query_response']['id'], {'smb': True})
-
-    rv = call(
-        'user.update',
-        UserAssets.TestUser01['query_response']['id'],
-        {'smb': True, 'password': UserAssets.TestUser01['create_payload']['password']}
-    )
-    assert rv
-    # TODO: why sleep here?
-    time.sleep(2)
-
-    # verify converted smb user doesn't leak password
-    results = SSH_TEST(
-        f'grep -R {UserAssets.TestUser01["create_payload"]["password"]!r} /var/log/middlewared.log',
-        user, password
-    )
-    assert results['result'] is False, str(results['output'])
-
-
-def test_006_verify_converted_smbuser_passdb_entry_exists(request):
-    """
-    At this point the non-SMB user has been converted to an SMB user. Verify
-    that a passdb entry was appropriately generated.
-    """
-    depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
-    qry = call(
-        'user.query',
-        [['username', '=', UserAssets.TestUser01['create_payload']['username']]],
-        {'get': True, 'extra': {'additional_information': ['SMB']}}
-    )
-    assert qry
-    assert qry['sid']
-
-
-def test_007_add_smbuser_to_sudoers(request):
-    depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
-    username = UserAssets.TestUser01['create_payload']['username']
-    # all sudo commands
-    call(
-        'user.update',
-        UserAssets.TestUser01['query_response']['id'],
-        {'sudo_commands': ['ALL'], 'sudo_commands_nopasswd': []}
-    )
-    check_config_file('/etc/sudoers', f"{username} ALL=(ALL) ALL")
-
-    # all sudo commands no password
-    call(
-        'user.update',
-        UserAssets.TestUser01['query_response']['id'],
-        {'sudo_commands': [], 'sudo_commands_nopasswd': ['ALL']}
-    )
-    check_config_file('/etc/sudoers', f"{username} ALL=(ALL) NOPASSWD: ALL")
-
-    # all sudo commands and all sudo commands no password
-    call(
-        'user.update',
-        UserAssets.TestUser01['query_response']['id'],
-        {'sudo_commands': ['ALL'], 'sudo_commands_nopasswd': ['ALL']}
-    )
-    check_config_file('/etc/sudoers', f"{username} ALL=(ALL) ALL, NOPASSWD: ALL")
-
-
-def test_008_disable_smb_and_password(request):
-    depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
-    username = UserAssets.TestUser01['create_payload']['username']
-    call(
-        'user.update',
-        UserAssets.TestUser01['query_response']['id'],
-        {'password_disabled': True, 'smb': False}
-    )
-    check_config_file('/etc/shadow', f'{username}:*:18397:0:99999:7:::')
-
-
-@pytest.mark.parametrize('username', [UserAssets.TestUser01['create_payload']['username']])
-def test_009_delete_user(username, request):
-    depends(request, ['SMB_CONVERT', UserAssets.TestUser01['depends_name']])
-    # delete the user first
-    call(
-        'user.delete',
-        UserAssets.TestUser01['query_response']['id'],
-        {'delete_group': True}
-    )
-    assert not call(
-        'user.query',
-        [['username', '=', UserAssets.TestUser01['query_response']['username']]]
-    )
-
-
-# FIXME: why is this being called here randomly in the middle of this test? And why are we using REST?
-# def test_25_has_local_administrator_set_up(request):
-    # depends(request, ["user_02", "user_01"])
-    # assert GET('/user/has_local_administrator_set_up/', anonymous=True).json() is True
-
-
-@pytest.mark.dependency(name=UserAssets.ShareUser01['depends_name'])
-def test_020_create_and_verify_shareuser():
-    UserAssets.ShareUser01['create_payload']['uid'] = call('user.get_next_uid')
-    UserAssets.ShareUser01['create_payload']['groups'].append(
-        call('group.query', [['group', '=', ROOT_GROUP]], {'get': True})['id']
-    )
-
-    call('user.create', UserAssets.ShareUser01['create_payload'])
-    qry = call('user.query', [['username', '=', UserAssets.ShareUser01['create_payload']['username']]], {'get': True})
-    UserAssets.ShareUser01['query_response'].update(qry)
-
-    # verify basic info
-    for key in ('username', 'full_name', 'shell'):
-        assert qry[key] == UserAssets.ShareUser01['create_payload'][key]
-
-    # verify password doesn't leak to middlewared.log
-    # we do this inside the create and verify function
-    # because this is severe enough problem that we should
-    # just "fail" at this step so it sets off a bunch of
-    # red flags in the CI
-    results = SSH_TEST(
-        f'grep -R {UserAssets.ShareUser01["create_payload"]["password"]!r} /var/log/middlewared.log',
-        user, password
-    )
-    assert results['result'] is False, str(results['output'])
-
-
-@pytest.mark.dependency(name=UserAssets.TestUser02['depends_name'])
-def test_031_create_user_with_homedir(request):
-    """Create a zfs dataset to be used as a home directory for a
-    local user. The user's SMB share_type is selected for this test
-    so that we verify that ACL is being stripped properly from the
-    newly-created home directory."""
-    # create the dataset
-    call('pool.dataset.create', HomeAssets.Dataset01['create_payload'])
-    call('filesystem.setacl', {
-        'path': os.path.join('/mnt', HomeAssets.Dataset01['create_payload']['name']),
-        'dacl': HomeAssets.Dataset01['home_acl']
-    }, job=True)
-
-    # now create the user
-    UserAssets.TestUser02['create_payload']['uid'] = call('user.get_next_uid')
-    call('user.create', UserAssets.TestUser02['create_payload'])
-    qry = call(
-        'user.query',
-        [['username', '=', UserAssets.TestUser02['create_payload']['username']]],
-        {'get': True, 'extra': {'additional_information': ['SMB']}}
-    )
-    UserAssets.TestUser02['query_response'].update(qry)
-
-    # verify basic info
-    for key in ('username', 'full_name', 'shell'):
-        assert qry[key] == UserAssets.TestUser02['create_payload'][key]
-
-    # verify password doesn't leak to middlewared.log
-    # we do this here because this is severe enough
-    # problem that we should just "fail" at this step
-    # so it sets off a bunch of red flags in the CI
-    results = SSH_TEST(
-        f'grep -R {UserAssets.TestUser02["create_payload"]["password"]!r} /var/log/middlewared.log',
-        user, password
-    )
-    assert results['result'] is False, str(results['output'])
-
-    pw = call(
-        'user.get_user_obj',
-        {'username': UserAssets.TestUser02['create_payload']['username'], 'sid_info': True}
-    )
-    UserAssets.TestUser02['get_user_obj_response'].update(pw)
-
-    # verify pwd
-    assert pw['pw_dir'] == os.path.join(
-        UserAssets.TestUser02['create_payload']['home'], UserAssets.TestUser02['create_payload']['username']
-    )
-    assert pw['pw_name'] == UserAssets.TestUser02['query_response']['username']
-    assert pw['pw_uid'] == UserAssets.TestUser02['query_response']['uid']
-    assert pw['pw_shell'] == UserAssets.TestUser02['query_response']['shell']
-    assert pw['pw_gecos'] == UserAssets.TestUser02['query_response']['full_name']
-    assert pw['sid'] is not None
-    assert pw['source'] == 'LOCAL'
-    assert pw['local'] is True
-
-    # verify smb user passdb entry
-    assert qry['sid']
-
-    # verify homedir acl is stripped
-    st_info = call('filesystem.stat', UserAssets.TestUser02['query_response']['home'])
-    assert st_info['acl'] is False
-
-
-def test_035_check_file_perms_in_homedir(request):
-    depends(request, [UserAssets.TestUser02['depends_name']])
-    home_path = UserAssets.TestUser02['query_response']['home']
-    for file, mode in HomeAssets.HOME_FILES['files'].items():
-        st_info = call('filesystem.stat', os.path.join(home_path, file.removeprefix('~/')))
-        assert oct(st_info['mode']) == mode, f"{file}: {st_info}"
-        assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
-
-
-def test_036_create_testfile_in_homedir(request):
-    depends(request, [UserAssets.TestUser02['depends_name']])
-    filename = UserAssets.TestUser02['filename']
-    filepath = f'{UserAssets.TestUser02["query_response"]["home"]}/{filename}'
-    results = SSH_TEST(
-        f'touch {filepath}; chown {UserAssets.TestUser01["query_response"]["uid"]} {filepath}',
-        user, password
-    )
-    assert results['result'] is True, results['output']
-    assert call('filesystem.stat', filepath)
-
-
-@pytest.mark.dependency(name="HOMEDIR2_EXISTS")
-def test_037_move_homedir_to_new_directory(request):
-    depends(request, [UserAssets.TestUser02['depends_name']])
-
-    # Validation of autocreation of homedir during path update
-    with dataset_asset('temp_dataset_for_home') as ds:
-        new_home = os.path.join('/mnt', ds)
-        call(
-            'user.update',
-            UserAssets.TestUser02['query_response']['id'],
-            {'home': new_home, 'home_create': True}
-        )
-
-        filters = [['method', '=', 'user.do_home_copy']]
-        opts = {'get': True, 'order_by': ['-id']}
-        move_job_timeout = 300  # 5 mins
-        move_job1 = call('core.get_jobs', filters, opts)
-        assert move_job1
-        rv = wait_on_job(move_job1['id'], move_job_timeout)
-        assert rv['state'] == 'SUCCESS', f'JOB: {move_job1!r}, RESULT: {str(rv["results"])}'
-
-        st_info = call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['create_payload']['username']))
-        assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
-
-        # now kick the can down the road to the root of our pool
-        new_home = os.path.join('/mnt', pool_name)
-        call(
-            'user.update',
-            UserAssets.TestUser02['query_response']['id'],
-            {'home': new_home, 'home_create': True}
-        )
-
-        move_job2 = call('core.get_jobs', filters, opts)
-        assert move_job2
-        assert move_job1['id'] != move_job2['id']
-        rv = wait_on_job(move_job2['id'], move_job_timeout)
-        assert rv['state'] == 'SUCCESS', f'JOB: {move_job2!r}, RESULT: {str(rv["results"])}'
-
-        st_info = call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['create_payload']['username']))
-        assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
-
-
-def test_038_change_homedir_to_existing_path(request):
-    depends(request, [UserAssets.ShareUser01['depends_name'], UserAssets.TestUser01['depends_name']])
-    # Manually create a new home dir
-    new_home = os.path.join(
-        '/mnt',
-        HomeAssets.Dataset01['create_payload']['name'],
-        HomeAssets.Dataset01['new_home']
-    )
-    results = SSH_TEST(f'mkdir {new_home}', user, password)
-    assert results['result'] is True, results['output']
-
-    # Move the homedir to existing dir
-    call(
-        'user.update',
-        UserAssets.TestUser02['query_response']['id'],
-        {'home': new_home}
-    )
-    filters = [['method', '=', 'user.do_home_copy']]
-    opts = {'get': True, 'order_by': ['-id']}
-    move_job_timeout = 300  # 5 mins
-    home_move_job = call('core.get_jobs', filters, opts)
-    rv = wait_on_job(home_move_job['id'], move_job_timeout)
-    assert rv['state'] == 'SUCCESS', str(rv['results'])
-
-    # verify files in the homedir that were moved are what we expect
-    for file, mode in HomeAssets.HOME_FILES['files'].items():
-        st_info = call('filesystem.stat', os.path.join(new_home, file.removeprefix("~/")))
-        assert oct(st_info['mode']) == mode, f"{file}: {st_info}"
-        assert st_info['uid'] == UserAssets.TestUser02['query_response']['uid']
-
-    # verify the specific file that existed in the previous homedir location was moved over
-    # NOTE: this file was created in test_036
-    assert call('filesystem.stat', os.path.join(new_home, UserAssets.TestUser02['filename']))
-
-
-def test_041_lock_smb_user(request):
-    depends(request, [UserAssets.TestUser02['depends_name']], scope='session')
-    assert call('user.update', UserAssets.TestUser02['query_response']['id'], {'locked': True})
-    username = UserAssets.TestUser02['create_payload']['username']
-    check_config_file('/etc/shadow', f'{username}:!:18397:0:99999:7:::')
-
-    username = UserAssets.TestUser02['create_payload']['username']
-
-    my_entry = call('smb.passdb_list', [['username', '=', username]], {'get': True}) 
-    assert my_entry['acct_ctrl'] & 0x00000400, str(my_entry)  # 0x00000400 is AUTO_LOCKED in MS-SAMR 
-
-
-def test_042_disable_smb_user(request):
-    depends(request, [UserAssets.TestUser02['depends_name']], scope='session')
-    assert call('user.update', UserAssets.TestUser02['query_response']['id'], {'smb': False})
-    qry = call(
-        'user.query',
-        [['username', '=', UserAssets.TestUser02['create_payload']['username']]],
-        {'get': True, 'extra': {'additional_information': ['SMB']}}
-    )
-    assert qry
-    assert qry['sid'] is None
-
-
-def test_043_raise_validation_error_on_homedir_collision(request):
-    """
-    Verify that validation error is raised if homedir collides with existing one.
-    """
-    depends(request, ['HOMEDIR2_EXISTS', UserAssets.TestUser02['depends_name']], scope='session')
-    # NOTE: this was used in test_038
-    existing_home = os.path.join(
-        '/mnt',
-        HomeAssets.Dataset01['create_payload']['name'],
-        HomeAssets.Dataset01['new_home']
-    )
-    with pytest.raises(ValidationErrors):
-        call(
-            'user.update',
-            UserAssets.ShareUser01['query_response']['id'],
-            {'home': existing_home}
-        )
-
-
-@pytest.mark.parametrize('username', [UserAssets.TestUser02['create_payload']['username']])
-def test_046_delete_homedir_user(username, request):
-    depends(request, [UserAssets.TestUser02['depends_name']], scope='session')
-    # delete user first
-    assert call(
-        'user.delete',
-        UserAssets.TestUser02['query_response']['id']
-    )
-
-    # now clean-up dataset that was used as homedir
-    assert call(
-        'pool.dataset.delete',
-        UserAssets.TestUser02['create_payload']['home'].removeprefix('/mnt/')
-    )
-
-
-def test_050_verify_no_builtin_smb_users(request):
-    """
-    We have builtin SMB groups, but should have no builtin
-    users. Failure here may indicate an issue with builtin user
-    synchronization code in middleware. Failure to catch this
-    may lead to accidentally granting SMB access to builtin
-    accounts.
-    """
-    qry = call('user.query', [['builtin', '=', True], ['smb', '=', True]], {'count': True})
-    assert qry == 0
-
-
-def test_058_create_new_user_knownfails(request):
-    """
-    Specifying an existing path without home_create should
-    succeed and set mode to desired value.
-    """
-    ds = {'pool': pool_name, 'name': 'user_test_exising_home_path'}
-    user_info = {
-        'username': 't1',
-        "full_name": 'T1',
-        'group_create': True,
-        'password': 'test1234',
-        'home_mode': '770'
-    }
-    with create_user_with_dataset(ds, {'payload': user_info, 'path': ''}) as user:
-        results = call('filesystem.stat', user['home'])
-        assert results['acl'] is False
-        assert f'{stat.S_IMODE(results["mode"]):03o}' == '770'
-
-        # Attempting to repeat the same with new user should
-        # fail (no users may share same home path)
-        user2 = {
-            'username': 't2',
-            'full_name': 't2',
-            'group_create': True,
-            'password': 'test1234',
-            'home': user['home']
-        }
-        with pytest.raises(ValidationErrors):
-            # Attempting to repeat the same with new user should
-            # fail (no users may share same home path)
-            call('user.create', user2)
-
-        with pytest.raises(ValidationErrors):
-            # Attempting to put homedir in subdirectory of existing homedir
-            # should also rase validation error
-            user2.update({'home_create': True})
-            call('user.create', user2)
-
-        with pytest.raises(ValidationErrors):
-            # Attempting to create a user with non-existing path
-            user2.update({'home': os.path.join(user2['home'], 'canary')})
-            call('user.create', user2)
-
-
-def test_059_create_user_ro_dataset(request):
-    with dataset_asset('ro_user_ds', {'readonly': 'ON'}) as ds:
-        with pytest.raises(ValidationErrors):
-            call('user.create', {
-                'username': 't1',
-                'full_name': 'T1',
-                'group_create': True,
-                'password': 'test1234',
-                'home_mode': '770',
-                'home_create': True,
-                'home': f'/mnt/{ds}'
-            })
-
-
-def test_060_immutable_user_validation(request):
-    # the `news` user is immutable
-    immutable_id = call('user.query', [['username', '=', 'news']], {'get': True})['id']
-    to_validate = [
-        {'group': 1},
-        {'home': '/mnt/tank', 'home_create': True},
-        {'smb': True},
-        {'username': 'no_way_bad'},
-    ]
-    for i in to_validate:
-        with pytest.raises(ValidationErrors) as ve:
-            call('user.update', immutable_id, i)
-        assert ve.value.errors[0].errmsg == 'This attribute cannot be changed'
-
-
-@contextlib.contextmanager
-def toggle_smb_configured():
-    ssh(f'rm {SMB_CONFIGURED_SENTINEL}')
-    assert call('smb.is_configured') is False
-    try:
-        yield
-    finally:
-        call('smb.set_configured')
-
-
-def test_099_cleanup_share_user():
-    # we have a test that asserts there are no smb accounts created
-    # by the time it runs so clean up this account
-    call('user.delete', UserAssets.ShareUser01['query_response']['id'])
diff --git a/tests/api2/test_012_directory_service_ssh.py b/tests/api2/test_012_directory_service_ssh.py
deleted file mode 100644
index d95c26cb45078..0000000000000
--- a/tests/api2/test_012_directory_service_ssh.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import pytest
-from functions import SSH_TEST
-
-from middlewared.test.integration.assets.directory_service import active_directory, ldap
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-try:
-    from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME
-except ImportError:
-    Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"'
-    pytestmark = pytest.mark.skip(reason=Reason)
-
-try:
-    from config import (
-        LDAPUSER,
-        LDAPPASSWORD
-    )
-except ImportError:
-    Reason = 'LDAP* variable are not setup in config.py'
-    pytestmark = pytest.mark.skipif(True, reason=Reason)
-
-
-@pytest.fixture(scope="function")
-def do_ad_connection(request):
-    with active_directory() as ad:
-        yield ad
-
-
-@pytest.fixture(scope="function")
-def do_ldap_connection(request):
-    with ldap() as ldap_conn:
-        yield ldap_conn
-
-
-def test_08_test_ssh_ad(do_ad_connection):
-    userobj = do_ad_connection['user_obj']
-    groupobj = call('group.get_group_obj', {'gid': userobj['pw_gid']})
-
-    payload = {"password_login_groups": [groupobj['gr_name']]}
-
-    try:
-        with expect_audit_method_calls([{
-            'method': 'ssh.update',
-            'params': [payload],
-            'description': 'Update SSH configuration'
-        }]):
-            call('ssh.update', payload)
-
-        results = SSH_TEST('ls -la', f'{ADUSERNAME}@{AD_DOMAIN}', ADPASSWORD)
-    finally:
-        call('ssh.update', {"password_login_groups": []})
-
-    assert results['result'] is True, results
-
-
-def test_09_test_ssh_ldap(do_ldap_connection):
-    userobj = call('user.get_user_obj', {'username': LDAPUSER})
-    groupobj = call('group.get_group_obj', {'gid': userobj['pw_gid']})
-    call('ssh.update', {"password_login_groups": [groupobj['gr_name']]})
-    cmd = 'ls -la'
-    results = SSH_TEST(cmd, LDAPUSER, LDAPPASSWORD)
-    call('ssh.update', {"password_login_groups": []})
-    assert results['result'] is True, results
diff --git a/tests/api2/test_014_failover_related.py b/tests/api2/test_014_failover_related.py
deleted file mode 100644
index 9f1c12f758fad..0000000000000
--- a/tests/api2/test_014_failover_related.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import errno
-
-import pytest
-from pytest_dependency import depends
-
-from functions import SSH_TEST
-from auto_config import ha, user, password
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import unprivileged_user
-from middlewared.test.integration.utils import call, client
-
-
-@pytest.fixture(scope='module')
-def readonly_admin():
-    # READONLY role implies FAILOVER_READ
-    with unprivileged_user(
-        username='failover_guy',
-        group_name='failover_admins',
-        privilege_name='FAILOVER_PRIV',
-        allowlist=[],
-        web_shell=False,
-        roles=['READONLY_ADMIN']
-    ) as acct:
-        yield acct
-
-
-@pytest.mark.dependency(name='hactl_install_dir')
-def test_01_check_hactl_installed(request):
-    rv = SSH_TEST('which hactl', user, password)
-    assert rv['stdout'].strip() == '/usr/local/sbin/hactl', rv['output']
-
-
-@pytest.mark.dependency(name='hactl_status')
-def test_02_check_hactl_status(request):
-    depends(request, ['hactl_install_dir'])
-    rv = SSH_TEST('hactl', user, password)
-    output = rv['stdout'].strip()
-    if ha:
-        for i in ('Node status:', 'This node serial:', 'Other node serial:', 'Failover status:'):
-            assert i in output, output
-    else:
-        assert 'Not an HA node' in output, output
-
-
-@pytest.mark.dependency(name='hactl_takeover')
-def test_03_check_hactl_takeover(request):
-    # integration tests run against the master node (at least they should...)
-    depends(request, ['hactl_status'])
-    rv = SSH_TEST('hactl takeover', user, password)
-    output = rv['stdout'].strip()
-    if ha:
-        assert 'This command can only be run on the standby node.' in output, output
-    else:
-        assert 'Not an HA node' in output, output
-
-
-@pytest.mark.dependency(name='hactl_enable')
-def test_04_check_hactl_enable(request):
-    # integration tests run against the master node (at least they should...)
-    depends(request, ['hactl_takeover'])
-    rv = SSH_TEST('hactl enable', user, password)
-    output = rv['stdout'].strip()
-    if ha:
-        assert 'Failover already enabled.' in output, output
-    else:
-        assert 'Not an HA node' in output, output
-
-
-def test_05_check_hactl_disable(request):
-    depends(request, ['hactl_enable'])
-    rv = SSH_TEST('hactl disable', user, password)
-    output = rv['stdout'].strip()
-    if ha:
-        assert 'Failover disabled.' in output, output
-        assert call('failover.config')['disabled'] is True
-        rv = SSH_TEST('hactl enable', user, password)
-        output = rv['stdout'].strip()
-        assert 'Failover enabled.' in output, output
-        assert call('failover.config')['disabled'] is False
-    else:
-        assert 'Not an HA node' in output, output
-
-
-if ha:
-    def test_07_failover_replicate():
-        old_ns = call('network.configuration.config')['nameserver3']
-        new_ns = '1.1.1.1'
-        try:
-            call('network.configuration.update', {'nameserver3': new_ns})
-
-            remote = call('failover.call_remote', 'network.configuration.config')
-            assert remote['nameserver3'] == new_ns
-            assert remote['state']['nameserver3'] == new_ns
-        finally:
-            call('network.configuration.update', {'nameserver3': old_ns})
-            remote = call('failover.call_remote', 'network.configuration.config')
-            assert remote['nameserver3'] == old_ns
-            assert remote['state']['nameserver3'] == old_ns
-
-    def test_08_readonly_ops(request, readonly_admin):
-        with client(auth=(readonly_admin.username, readonly_admin.password)) as c:
-            c.call('failover.config')
-            c.call('failover.node')
-            with pytest.raises(CallError) as ce:
-                c.call('failover.call_remote', 'user.update')
-
-            assert ce.value.errno == errno.EACCES
diff --git a/tests/api2/test_015_services.py b/tests/api2/test_015_services.py
deleted file mode 100644
index 0fab84e856923..0000000000000
--- a/tests/api2/test_015_services.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import time
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.utils import call, ssh
-
-def test_001_oom_check():
-    pid = call('core.get_pid')
-    assert call('core.get_oom_score_adj', pid) == -1000
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)  # Sometimes systemd unit state is erroneously reported as active
-def test_non_silent_service_start_failure():
-    """
-    This test for 2 conditions:
-        1. middleware raises CallError that isn't empty
-        2. each time a CallError is raised, the message
-            has a timestamp and that timestamp changes
-            with each failure
-    """
-    with pytest.raises(CallError) as e:
-        call('service.start', 'ups', {'silent': False})
-
-    # Error looks like
-    """
-    middlewared.service_exception.CallError: [EFAULT] Jan 10 08:49:14 systemd[1]: Starting Network UPS Tools - power device monitor and shutdown controller...
-    Jan 10 08:49:14 nut-monitor[3032658]: fopen /run/nut/upsmon.pid: No such file or directory
-    Jan 10 08:49:14 nut-monitor[3032658]: Unable to use old-style MONITOR line without a username
-    Jan 10 08:49:14 nut-monitor[3032658]: Convert it and add a username to upsd.users - see the documentation
-    Jan 10 08:49:14 nut-monitor[3032658]: Fatal error: unusable configuration
-    Jan 10 08:49:14 nut-monitor[3032658]: Network UPS Tools upsmon 2.7.4
-    Jan 10 08:49:14 systemd[1]: nut-monitor.service: Control process exited, code=exited, status=1/FAILURE
-    Jan 10 08:49:14 systemd[1]: nut-monitor.service: Failed with result 'exit-code'.
-    Jan 10 08:49:14 systemd[1]: Failed to start Network UPS Tools - power device monitor and shutdown controller.
-    """
-    lines1 = e.value.errmsg.splitlines()
-    first_ts, len_lines1 = ' '.join(lines1.pop(0).split()[:3]), len(lines1)
-    assert any('nut-monitor[' in line for line in lines1), lines1
-    assert any('systemd[' in line for line in lines1), lines1
-
-    # make sure we don't trigger system StartLimitBurst threshold
-    # by removing this service from failed unit list (if it's there)
-    ssh('systemctl reset-failed nut-monitor')
-
-    # we have to sleep 1 second here or the timestamp will be the
-    # same as when we first tried to start the service which is
-    # what we're testing to make sure the message is up to date
-    # with reality
-    time.sleep(1)
-
-    with pytest.raises(CallError) as e:
-        call('service.start', 'ups', {'silent': False})
-
-    # Error looks like: (Notice timestamp change, which is what we verify
-    """
-    middlewared.service_exception.CallError: [EFAULT] Jan 10 08:49:15 systemd[1]: Starting Network UPS Tools - power device monitor and shutdown controller...
-    Jan 10 08:49:15 nut-monitor[3032739]: fopen /run/nut/upsmon.pid: No such file or directory
-    Jan 10 08:49:15 nut-monitor[3032739]: Unable to use old-style MONITOR line without a username
-    Jan 10 08:49:15 nut-monitor[3032739]: Convert it and add a username to upsd.users - see the documentation
-    Jan 10 08:49:15 nut-monitor[3032739]: Fatal error: unusable configuration
-    Jan 10 08:49:15 nut-monitor[3032739]: Network UPS Tools upsmon 2.7.4
-    Jan 10 08:49:15 systemd[1]: nut-monitor.service: Control process exited, code=exited, status=1/FAILURE
-    Jan 10 08:49:15 systemd[1]: nut-monitor.service: Failed with result 'exit-code'.
-    Jan 10 08:49:15 systemd[1]: Failed to start Network UPS Tools - power device monitor and shutdown controller.
-    """
-    lines2 = e.value.errmsg.splitlines()
-    second_ts, len_lines2 = ' '.join(lines2.pop(0).split()[:3]), len(lines2)
-    assert any('nut-monitor[' in line for line in lines2), lines2
-    assert any('systemd[' in line for line in lines2), lines2
-
-    # timestamp should change since we sleep(1)
-    assert first_ts != second_ts
-
-    # the error messages will differ slightly (different PID for upsmon) but the number
-    # of lines should be the same
-    assert len_lines1 == len_lines2
-
-    # Stop the service to avoid syslog spam
-    call('service.stop', 'ups')
diff --git a/tests/api2/test_030_activedirectory.py b/tests/api2/test_030_activedirectory.py
deleted file mode 100644
index 3f35c9d1ea9a4..0000000000000
--- a/tests/api2/test_030_activedirectory.py
+++ /dev/null
@@ -1,416 +0,0 @@
-import ipaddress
-import os
-from time import sleep
-
-import dns.resolver
-import pytest
-from truenas_api_client import \
-    ValidationErrors as ClientValidationErrors
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.directory_service import (
-    active_directory, override_nameservers)
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.privilege import privilege
-from middlewared.test.integration.assets.product import product_type
-from middlewared.test.integration.utils import call, client, ssh
-from middlewared.test.integration.utils.client import truenas_server
-from middlewared.test.integration.utils.system import reset_systemd_svcs
-
-from auto_config import ha
-from protocols import smb_connection, smb_share
-from truenas_api_client import ClientException
-
-if ha and "hostname_virtual" in os.environ:
-    hostname = os.environ["hostname_virtual"]
-else:
-    from auto_config import hostname
-
-try:
-    from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME
-    AD_USER = fr"AD02\{ADUSERNAME.lower()}"
-except ImportError:
-    Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"'
-    pytestmark = pytest.mark.skip(reason=Reason)
-
-
-SMB_NAME = "TestADShare"
-
-
-def remove_dns_entries(payload):
-    call('dns.nsupdate', {'ops': payload})
-
-
-def cleanup_forward_zone():
-    try:
-        result = call('dnsclient.forward_lookup', {'names': [f'{hostname}.{AD_DOMAIN}']})
-    except dns.resolver.NXDOMAIN:
-        # No entry, nothing to do
-        return
-
-    ips_to_remove = [rdata['address'] for rdata in result]
-
-    payload = []
-    for i in ips_to_remove:
-        addr = ipaddress.ip_address(i)
-        payload.append({
-            'command': 'DELETE',
-            'name': f'{hostname}.{AD_DOMAIN}.',
-            'address': str(addr),
-            'type': 'A' if addr.version == 4 else 'AAAA'
-        })
-
-    remove_dns_entries(payload)
-
-
-def check_ad_started():
-    ds = call('directoryservices.status')
-    if ds['type'] is None:
-        return False
-
-    assert ds['type'] == 'ACTIVEDIRECTORY'
-    assert ds['status'] == 'HEALTHY'
-    return True
-
-
-def cleanup_reverse_zone():
-    result = call('activedirectory.ipaddresses_to_register', {'hostname': f'{hostname}.{AD_DOMAIN}.', 'bindip': []}, False)
-    ptr_table = {f'{ipaddress.ip_address(i).reverse_pointer}.': i for i in result}
-
-    try:
-        result = call('dnsclient.reverse_lookup', {'addresses': list(ptr_table.values())})
-    except dns.resolver.NXDOMAIN:
-        # No entry, nothing to do
-        return
-
-    payload = []
-    for host in result:
-        reverse_pointer = host["name"]
-        assert reverse_pointer in ptr_table, str(ptr_table)
-        addr = ipaddress.ip_address(ptr_table[reverse_pointer])
-        payload.append({
-            'command': 'DELETE',
-            'name': host['target'],
-            'address': str(addr),
-            'type': 'A' if addr.version == 4 else 'AAAA'
-        })
-
-    remove_dns_entries(payload)
-
-
-@pytest.fixture(scope="function")
-def set_product_type(request):
-    if ha:
-        # HA product is already enterprise-licensed
-        yield
-    else:
-        with product_type():
-            yield
-
-
-@pytest.fixture(scope="function")
-def set_ad_nameserver(request):
-    with override_nameservers() as ns:
-        yield (request, ns)
-
-
-def test_cleanup_nameserver(set_ad_nameserver):
-    domain_info = call('activedirectory.domain_info', AD_DOMAIN)
-
-    cred = call('kerberos.get_cred', {'dstype': 'ACTIVEDIRECTORY',
-                                      'conf': {'bindname': ADUSERNAME,
-                                               'bindpw': ADPASSWORD,
-                                               'domainname': AD_DOMAIN
-                                               }
-                                      })
-
-    call('kerberos.do_kinit', {'krb5_cred': cred,
-                               'kinit-options': {'kdc_override': {'domain': AD_DOMAIN.upper(),
-                                                                  'kdc': domain_info['KDC server']
-                                                                  },
-                                                 }
-                               })
-
-    # Now that we have proper kinit as domain admin
-    # we can nuke stale DNS entries from orbit.
-    #
-    cleanup_forward_zone()
-    cleanup_reverse_zone()
-
-
-def test_enable_leave_activedirectory():
-    reset_systemd_svcs('winbind')
-    assert check_ad_started() is False
-
-    if not ha:
-        with pytest.raises(ValidationErrors):
-            # At this point we are not enterprise licensed
-            call("system.general.update", {"ds_auth": True})
-
-    short_name = None
-
-    with active_directory(dns_timeout=15) as ad:
-        short_name = ad['dc_info']['Pre-Win2k Domain']
-
-        # Make sure we can read our secrets.tdb file
-        secrets_has_domain = call('directoryservices.secrets.has_domain', short_name)
-        assert secrets_has_domain is True
-
-        # Check that our database has backup of this info written to it.
-        db_secrets = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$']
-        assert f'SECRETS/MACHINE_PASSWORD/{short_name}' in db_secrets
-
-        # Last password change should be populated
-        passwd_change = call('directoryservices.get_last_password_change')
-        assert passwd_change['dbconfig'] is not None
-        assert passwd_change['secrets'] is not None
-
-        # We should be able tZZo change some parameters when joined to AD
-        call('activedirectory.update', {'domainname': AD_DOMAIN, 'verbose_logging': True}, job=True)
-
-        # Changing kerberos realm should raise ValidationError
-        with pytest.raises(ClientValidationErrors) as ve:
-            call('activedirectory.update', {'domainname': AD_DOMAIN, 'kerberos_realm': None}, job=True)
-
-        assert ve.value.errors[0].errmsg.startswith('Kerberos realm may not be altered')
-
-        # This should be caught by our catchall
-        with pytest.raises(ClientValidationErrors) as ve:
-            call('activedirectory.update', {'domainname': AD_DOMAIN, 'createcomputer': ''}, job=True)
-
-        assert ve.value.errors[0].errmsg.startswith('Parameter may not be changed')
-
-        assert check_ad_started() is True
-
-        # Verify that idmapping is working
-        pw = ad['user_obj']
-
-        # Verify winbindd information
-        assert pw['sid'] is not None, str(ad)
-        assert not pw['sid'].startswith('S-1-22-1-'), str(ad)
-        assert pw['local'] is False
-        assert pw['source'] == 'ACTIVEDIRECTORY'
-
-        result = call('dnsclient.forward_lookup', {'names': [f'{hostname}.{AD_DOMAIN}']})
-        assert len(result) != 0
-
-        addresses = [x['address'] for x in result]
-        assert truenas_server.ip in addresses
-
-        res = call('privilege.query', [['name', 'C=', AD_DOMAIN]], {'get': True})
-        assert res['ds_groups'][0]['name'].endswith('domain admins')
-        assert res['ds_groups'][0]['sid'].endswith('512')
-        assert res['allowlist'][0] == {'method': '*', 'resource': '*'}
-
-    assert check_ad_started() is False
-
-    secrets_has_domain = call('directoryservices.secrets.has_domain', short_name)
-    assert secrets_has_domain is False
-
-    with pytest.raises(KeyError):
-        call('user.get_user_obj', {'username': AD_USER})
-
-    result = call('privilege.query', [['name', 'C=', AD_DOMAIN]])
-    assert len(result) == 0, str(result)
-
-
-def test_activedirectory_smb_ops():
-    reset_systemd_svcs('winbind')
-    with active_directory(dns_timeout=15) as ad:
-        short_name = ad['dc_info']['Pre-Win2k Domain']
-        machine_password_key = f'SECRETS/MACHINE_PASSWORD/{short_name}'
-        running_pwd = call('directoryservices.secrets.dump')[machine_password_key]
-        db_pwd = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$'][machine_password_key]
-
-        # We've joined and left AD already. Verify secrets still getting backed up correctly.
-        assert running_pwd == db_pwd
-
-        with dataset(
-            "ad_smb",
-            {'share_type': 'SMB'},
-            acl=[{
-                'tag': 'GROUP',
-                'id': ad['user_obj']['pw_uid'],
-                'perms': {'BASIC': 'FULL_CONTROL'},
-                'flags': {'BASIC': 'INHERIT'},
-                'type': 'ALLOW'
-            }]
-        ) as ds:
-            call('service.restart', 'cifs')
-
-            with smb_share(f'/mnt/{ds}', {'name': SMB_NAME}):
-                with smb_connection(
-                    host=truenas_server.ip,
-                    share=SMB_NAME,
-                    username=ADUSERNAME,
-                    domain='AD02',
-                    password=ADPASSWORD
-                ) as c:
-                    fd = c.create_file('testfile.txt', 'w')
-                    c.write(fd, b'foo')
-                    val = c.read(fd, 0, 3)
-                    c.close(fd, True)
-                    assert val == b'foo'
-
-                    c.mkdir('testdir')
-                    fd = c.create_file('testdir/testfile2.txt', 'w')
-                    c.write(fd, b'foo2')
-                    val = c.read(fd, 0, 4)
-                    c.close(fd, True)
-                    assert val == b'foo2'
-
-                    c.rmdir('testdir')
-
-        with dataset(
-            "ad_datasets",
-            {'share_type': 'SMB'},
-            acl=[{
-                'tag': 'GROUP',
-                'id': ad['user_obj']['pw_uid'],
-                'perms': {'BASIC': 'FULL_CONTROL'},
-                'flags': {'BASIC': 'INHERIT'},
-                'type': 'ALLOW'
-            }]
-        ) as ds:
-            with smb_share(f'/mnt/{ds}', {
-                'name': 'DATASETS',
-                'purpose': 'NO_PRESET',
-                'auxsmbconf': 'zfs_core:zfs_auto_create = true',
-                'path_suffix': '%D/%U'
-            }):
-                with smb_connection(
-                    host=truenas_server.ip,
-                    share='DATASETS',
-                    username=ADUSERNAME,
-                    domain='AD02',
-                    password=ADPASSWORD
-                ) as c:
-                    fd = c.create_file('nested_test_file', "w")
-                    c.write(fd, b'EXTERNAL_TEST')
-                    c.close(fd)
-
-            acl = call('filesystem.getacl', os.path.join(f'/mnt/{ds}', 'AD02', ADUSERNAME), True)
-            assert acl['trivial'] is False, str(acl)
-
-        with dataset(
-            "ad_home",
-            {'share_type': 'SMB'},
-            acl=[{
-                'tag': 'GROUP',
-                'id': ad['user_obj']['pw_uid'],
-                'perms': {'BASIC': 'FULL_CONTROL'},
-                'flags': {'BASIC': 'INHERIT'},
-                'type': 'ALLOW'
-            }]
-        ) as ds:
-
-            with smb_share(f'/mnt/{ds}', {
-                'name': 'TEST_HOME',
-                'purpose': 'NO_PRESET',
-                'home': True,
-            }):
-                # must refresh idmap cache to get new homedir from NSS
-                # this means we may need a few seconds for winbindd
-                # service to settle down on slow systems (like our CI VMs)
-                sleep(10 if ha else 5)
-
-                with smb_connection(
-                    host=truenas_server.ip,
-                    share='HOMES',
-                    username=ADUSERNAME,
-                    domain='AD02',
-                    password=ADPASSWORD
-                ) as c:
-                    fd = c.create_file('homes_test_file', "w")
-                    c.write(fd, b'EXTERNAL_TEST')
-                    c.close(fd)
-
-            file_local_path = os.path.join(f'/mnt/{ds}', 'AD02', ADUSERNAME, 'homes_test_file')
-            acl = call('filesystem.getacl', file_local_path, True)
-            assert acl['trivial'] is False, str(acl)
-
-
-def test_account_privilege_authentication(set_product_type):
-    reset_systemd_svcs('winbind smbd')
-
-    with active_directory(dns_timeout=15):
-        call("system.general.update", {"ds_auth": True})
-        nusers = call("user.query", [["local", "=", False]], {"count": True})
-        assert nusers > 0
-        ngroups = call("group.query", [["local", "=", False]], {"count": True})
-        assert ngroups > 0
-        try:
-            # RID 513 is constant for "Domain Users"
-            domain_sid = call("idmap.domain_info", AD_DOMAIN.split(".")[0])['sid']
-            with privilege({
-                "name": "AD privilege",
-                "local_groups": [],
-                "ds_groups": [f"{domain_sid}-513"],
-                "allowlist": [
-                    {"method": "CALL", "resource": "system.info"},
-                    {"method": "CALL", "resource": "user.query"},
-                    {"method": "CALL", "resource": "group.query"},
-                ],
-                "web_shell": False,
-            }):
-                with client(auth=(f"limiteduser@{AD_DOMAIN}", ADPASSWORD)) as c:
-                    methods = c.call("core.get_methods")
-                    me = c.call("auth.me")
-
-                    assert 'DIRECTORY_SERVICE' in me['account_attributes']
-                    assert 'ACTIVE_DIRECTORY' in me['account_attributes']
-
-                    assert len(c.call("user.query", [["local", "=", False]])) == nusers
-                    assert len(c.call("group.query", [["local", "=", False]])) == ngroups
-
-                assert "system.info" in methods
-                assert "pool.create" not in methods
-
-                # ADUSERNAME is member of domain admins and will have
-                # all privileges
-                with client(auth=(f"{ADUSERNAME}@{AD_DOMAIN}", ADPASSWORD)) as c:
-                    methods = c.call("core.get_methods")
-
-                assert "pool.create" in methods
-
-                # Alternative formatting for user name <DOMAIN>\<username>.
-                # this should also work for auth
-                with client(auth=(AD_USER, ADPASSWORD)) as c:
-                    methods = c.call("core.get_methods")
-
-                assert "pool.create" in methods
-
-        finally:
-            call("system.general.update", {"ds_auth": False})
-
-
-def test_secrets_restore():
-
-    with active_directory():
-        reset_systemd_svcs('winbind smbd')
-        assert check_ad_started() is True
-
-        ssh('rm /var/db/system/samba4/private/secrets.tdb')
-
-        with pytest.raises(ClientException):
-            call('directoryservices.health.check')
-
-        call('directoryservices.health.recover')
-
-        assert check_ad_started() is True
-
-
-def test_keytab_restore():
-
-    with active_directory():
-        reset_systemd_svcs('winbind smbd')
-        assert check_ad_started() is True
-
-        kt_id = call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True})['id']
-
-        # delete our keytab from datastore
-        call('datastore.delete', 'directoryservice.kerberoskeytab', kt_id)
-
-        call('directoryservices.health.recover')
-
-        # verify that it was recreated during health check
-        call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True})
diff --git a/tests/api2/test_032_ad_kerberos.py b/tests/api2/test_032_ad_kerberos.py
deleted file mode 100644
index 9b1e889fd52ab..0000000000000
--- a/tests/api2/test_032_ad_kerberos.py
+++ /dev/null
@@ -1,355 +0,0 @@
-import os
-import sys
-
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from functions import SSH_TEST
-from auto_config import hostname, password, user
-from contextlib import contextmanager
-from base64 import b64decode
-from protocols import nfs_share
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.directory_service import active_directory
-
-try:
-    from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME, AD_COMPUTER_OU
-except ImportError:
-    pytestmark = pytest.mark.skip(reason='Missing AD configuration')
-
-SAMPLE_KEYTAB = "BQIAAABTAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAABHAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAAGVEVTVDQ5AAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAABTAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBAAMACDHN3Kv9WKLLAAAAAQAAAAAAAABHAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAAGVEVTVDQ5AAAAAV8kEroBAAMACDHN3Kv9WKLLAAAAAQAAAAAAAABbAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBABEAEBDQOH+tKYCuoedQ53WWKFgAAAABAAAAAAAAAE8AAgALSE9NRURPTS5GVU4AEXJlc3RyaWN0ZWRrcmJob3N0AAZURVNUNDkAAAABXyQSugEAEQAQENA4f60pgK6h51DndZYoWAAAAAEAAAAAAAAAawACAAtIT01FRE9NLkZVTgARcmVzdHJpY3RlZGtyYmhvc3QAEnRlc3Q0OS5ob21lZG9tLmZ1bgAAAAFfJBK6AQASACCKZTjTnrjT30jdqAG2QRb/cFyTe9kzfLwhBAm5QnuMiQAAAAEAAAAAAAAAXwACAAtIT01FRE9NLkZVTgARcmVzdHJpY3RlZGtyYmhvc3QABlRFU1Q0OQAAAAFfJBK6AQASACCKZTjTnrjT30jdqAG2QRb/cFyTe9kzfLwhBAm5QnuMiQAAAAEAAAAAAAAAWwACAAtIT01FRE9NLkZVTgARcmVzdHJpY3RlZGtyYmhvc3QAEnRlc3Q0OS5ob21lZG9tLmZ1bgAAAAFfJBK6AQAXABAcyjciCUnM9DmiyiPO4VIaAAAAAQAAAAAAAABPAAIAC0hPTUVET00uRlVOABFyZXN0cmljdGVka3JiaG9zdAAGVEVTVDQ5AAAAAV8kEroBABcAEBzKNyIJScz0OaLKI87hUhoAAAABAAAAAAAAAEYAAgALSE9NRURPTS5GVU4ABGhvc3QAEnRlc3Q0OS5ob21lZG9tLmZ1bgAAAAFfJBK6AQABAAgxzdyr/ViiywAAAAEAAAAAAAAAOgACAAtIT01FRE9NLkZVTgAEaG9zdAAGVEVTVDQ5AAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAABGAAIAC0hPTUVET00uRlVOAARob3N0ABJ0ZXN0NDkuaG9tZWRvbS5mdW4AAAABXyQSugEAAwAIMc3cq/1YossAAAABAAAAAAAAADoAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQADAAgxzdyr/ViiywAAAAEAAAAAAAAATgACAAtIT01FRE9NLkZVTgAEaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBABEAEBDQOH+tKYCuoedQ53WWKFgAAAABAAAAAAAAAEIAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQARABAQ0Dh/rSmArqHnUOd1lihYAAAAAQAAAAAAAABeAAIAC0hPTUVET00uRlVOAARob3N0ABJ0ZXN0NDkuaG9tZWRvbS5mdW4AAAABXyQSugEAEgAgimU40564099I3agBtkEW/3Bck3vZM3y8IQQJuUJ7jIkAAAABAAAAAAAAAFIAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQASACCKZTjTnrjT30jdqAG2QRb/cFyTe9kzfLwhBAm5QnuMiQAAAAEAAAAAAAAATgACAAtIT01FRE9NLkZVTgAEaG9zdAASdGVzdDQ5LmhvbWVkb20uZnVuAAAAAV8kEroBABcAEBzKNyIJScz0OaLKI87hUhoAAAABAAAAAAAAAEIAAgALSE9NRURPTS5GVU4ABGhvc3QABlRFU1Q0OQAAAAFfJBK6AQAXABAcyjciCUnM9DmiyiPO4VIaAAAAAQAAAAAAAAA1AAEAC0hPTUVET00uRlVOAAdURVNUNDkkAAAAAV8kEroBAAEACDHN3Kv9WKLLAAAAAQAAAAAAAAA1AAEAC0hPTUVET00uRlVOAAdURVNUNDkkAAAAAV8kEroBAAMACDHN3Kv9WKLLAAAAAQAAAAAAAAA9AAEAC0hPTUVET00uRlVOAAdURVNUNDkkAAAAAV8kEroBABEAEBDQOH+tKYCuoedQ53WWKFgAAAABAAAAAAAAAE0AAQALSE9NRURPTS5GVU4AB1RFU1Q0OSQAAAABXyQSugEAEgAgimU40564099I3agBtkEW/3Bck3vZM3y8IQQJuUJ7jIkAAAABAAAAAAAAAD0AAQALSE9NRURPTS5GVU4AB1RFU1Q0OSQAAAABXyQSugEAFwAQHMo3IglJzPQ5osojzuFSGgAAAAEAAAAA"  # noqa
-
-SAMPLEDOM_NAME = "CANARY.FUN"
-SAMPLEDOM_REALM = {
-    "realm": SAMPLEDOM_NAME,
-    "kdc": ["169.254.100.1", "169.254.100.2", "169.254.100.3"],
-    "admin_server": ["169.254.100.10", "169.254.100.11", "169.254.100.12"],
-    "kpasswd_server": ["169.254.100.20", "169.254.100.21", "169.254.100.22"],
-}
-
-
-APPDEFAULTS_PAM_OVERRIDE = """
-pam = {
-    forwardable = false
-    ticket_lifetime = 36000
-}
-"""
-
-
-def get_export_sec(exports_config):
-    sec_entry = None
-    for entry in exports_config.splitlines():
-        if not entry.startswith("\t"):
-            continue
-
-        line = entry.strip().split("(")[1]
-        sec_entry = line.split(",")[0]
-        break
-
-    return sec_entry
-
-
-def regenerate_exports():
-    # NFS service isn't running for these tests
-    # and so exports aren't updated. Force the update.
-    call('etc.generate', 'nfsd')
-
-
-def check_export_sec(expected):
-    regenerate_exports()
-    results = SSH_TEST('cat /etc/exports', user, password)
-    assert results['result'] is True, results['stderr']
-    exports_config = results['stdout'].strip()
-    sec = get_export_sec(exports_config)
-    assert sec == expected, exports_config
-
-
-def parse_krb5_conf(fn, split=None, state=None):
-    results = SSH_TEST('cat /etc/krb5.conf', user, password)
-    assert results['result'] is True, results['output']
-
-    if split:
-        krb5conf_lines = results['stdout'].split(split)
-    else:
-        krb5conf_lines = results['stdout'].splitlines()
-
-    for idx, entry in enumerate(krb5conf_lines):
-        fn(krb5conf_lines, idx, entry, state)
-
-    return results['output']
-
-
-@contextmanager
-def add_kerberos_keytab(ktname):
-    kt = call('kerberos.keytab.create', {
-        "name": ktname,
-        "file": SAMPLE_KEYTAB
-    })
-    try:
-        yield kt
-    finally:
-        call('kerberos.keytab.delete', kt['id'])
-
-
-@contextmanager
-def add_kerberos_realm(realm_name):
-    realm = call('kerberos.realm.create', {
-        'realm': realm_name,
-    })
-    try:
-        yield realm
-    finally:
-        call('kerberos.realm.delete', realm['id'])
-
-
-@pytest.fixture(scope="function")
-def do_ad_connection(request):
-    with active_directory(
-        AD_DOMAIN,
-        ADUSERNAME,
-        ADPASSWORD,
-        netbiosname=hostname,
-        createcomputer=AD_COMPUTER_OU,
-    ) as ad:
-        yield (request, ad)
-
-
-def test_kerberos_keytab_and_realm(do_ad_connection):
-
-    def krb5conf_parser(krb5conf_lines, idx, entry, state):
-        if entry.lstrip() == f"kdc = {SAMPLEDOM_REALM['kdc'][0]}":
-            assert krb5conf_lines[idx + 1].lstrip() == f"kdc = {SAMPLEDOM_REALM['kdc'][1]}"
-            assert krb5conf_lines[idx + 2].lstrip() == f"kdc = {SAMPLEDOM_REALM['kdc'][2]}"
-            state['has_kdc'] = True
-
-        if entry.lstrip() == f"admin_server = {SAMPLEDOM_REALM['admin_server'][0]}":
-            assert krb5conf_lines[idx + 1].lstrip() == f"admin_server = {SAMPLEDOM_REALM['admin_server'][1]}"
-            assert krb5conf_lines[idx + 2].lstrip() == f"admin_server = {SAMPLEDOM_REALM['admin_server'][2]}"
-            state['has_admin_server'] = True
-
-        if entry.lstrip() == f"kpasswd_server = {SAMPLEDOM_REALM['kpasswd_server'][0]}":
-            assert krb5conf_lines[idx + 1].lstrip() == f"kpasswd_server = {SAMPLEDOM_REALM['kpasswd_server'][1]}"
-            assert krb5conf_lines[idx + 2].lstrip() == f"kpasswd_server = {SAMPLEDOM_REALM['kpasswd_server'][2]}"
-            state['has_kpasswd_server'] = True
-
-    call('directoryservices.status')['status'] == 'HEALTHY'
-    """
-    The keytab in this case is a b64encoded keytab file.
-    AD_MACHINE_ACCOUNT is automatically generated during domain
-    join and uploaded into our configuration database. This
-    test checks for its presence and that it's validly b64 encoded.
-    The process of decoding and adding to system keytab is tested
-    in later kerberos tests. "kerberos.start" will decode, write
-    to system keytab, and kinit. So in this case, proper function
-    can be determined by printing contents of system keytab and
-    verifying that we were able to get a kerberos ticket.
-    """
-    kt = call('kerberos.keytab.query', [['name', '=', 'AD_MACHINE_ACCOUNT']], {'get': True})
-    b64decode(kt['file'])
-
-    """
-    kerberos_principal_choices lists unique keytab principals in
-    the system keytab. AD_MACHINE_ACCOUNT should add more than
-    one principal.
-    """
-    orig_kt = call('kerberos.keytab.kerberos_principal_choices')
-    assert orig_kt != []
-
-    """
-    kerberos.check_ticket performs a platform-independent verification
-    of kerberos ticket.
-    """
-    call('kerberos.check_ticket')
-
-    """
-    Test uploading b64encoded sample kerberos keytab included
-    at top of this file. In the next series of tests we will
-    upload, validate that it was uploaded, and verify that the
-    keytab is read back correctly.
-    """
-    with add_kerberos_keytab('KT2'):
-        kt2 = call('kerberos.keytab.query', [['name', '=', 'KT2']], {'get': True})
-        b64decode(kt2['file'])
-        assert kt2['file'] == SAMPLE_KEYTAB
-
-    """
-    AD Join should automatically add a kerberos realm
-    for the AD domain.
-    """
-    call('kerberos.realm.query', [['realm', '=', AD_DOMAIN.upper()]], {'get': True})
-
-    with add_kerberos_realm(SAMPLEDOM_NAME) as new_realm:
-        payload = SAMPLEDOM_REALM.copy()
-        payload.pop("realm")
-        call('kerberos.realm.update', new_realm['id'], payload)
-
-        r = call('kerberos.realm.query', [['realm', '=', SAMPLEDOM_NAME]], {'get': True})
-        r.pop('id')
-        assert r == SAMPLEDOM_REALM
-
-        # Verify realms properly added to krb5.conf
-        iter_state = {
-            'has_kdc': False,
-            'has_admin_server': False,
-            'has_kpasswd_server': False
-        }
-        output = parse_krb5_conf(krb5conf_parser, state=iter_state)
-
-        assert iter_state['has_kdc'] is True, output
-        assert iter_state['has_admin_server'] is True, output
-        assert iter_state['has_kpasswd_server'] is True, output
-
-    assert len(call('kerberos.realm.query', [['realm', '=', SAMPLEDOM_NAME]])) == 0
-
-
-def test_kerberos_krbconf(do_ad_connection):
-    def parser_1(unused, idx, sec, state):
-        if not sec.startswith("appdefaults"):
-            return
-
-        for entry in sec.splitlines():
-            if entry.lstrip().startswith('}'):
-                break
-
-            if entry.strip() == "forwardable = false":
-                state['has_forwardable'] = True
-
-            if entry.strip() == "ticket_lifetime = 36000":
-                state['has_ticket_lifetime'] = True
-
-    def parse_section(unused, idx, sec, state):
-        if not sec.startswith(state['section']):
-            return
-
-        for entry in sec.splitlines():
-            if entry.strip() == state['to_check']:
-                state['found'] = True
-                break
-
-    """
-    Test of more complex auxiliary parameter parsing that allows
-    users to override our defaults.
-    """
-
-    call('kerberos.update', {'appdefaults_aux': APPDEFAULTS_PAM_OVERRIDE})
-
-    iter_state = {
-        'has_forwardable': False,
-        'has_ticket_lifetime': False
-    }
-
-    output = parse_krb5_conf(parser_1, split='[', state=iter_state)
-
-    assert iter_state['has_forwardable'] is True, output
-    assert iter_state['has_ticket_lifetime'] is True, output
-
-    call('kerberos.update', {'appdefaults_aux': 'encrypt = true'})
-
-    iter_state = {
-        'section': 'appdefaults',
-        'found': False,
-        'to_check': 'encrypt = true'
-    }
-
-    output = parse_krb5_conf(parse_section, split='[', state=iter_state)
-    assert iter_state['found'] is True, output
-
-    call('kerberos.update', {'libdefaults_aux': 'rdns = true'})
-
-    iter_state = {
-        'section': 'libdefaults',
-        'found': False,
-        'to_check': 'rdns = true'
-    }
-    output = parse_krb5_conf(parse_section, split='[', state=iter_state)
-    assert iter_state['found'] is True, output
-
-
-def test_invalid_aux():
-    call('kerberos.update', {'appdefaults_aux': '', 'libdefaults_aux': ''})
-
-    # check that parser raises validation errors
-    with pytest.raises(ValidationErrors):
-        call('kerberos.update', {'appdefaults_aux': 'canary = true'})
-
-    with pytest.raises(ValidationErrors):
-        call('kerberos.update', {'libdefaults_aux': 'canary = true'})
-
-
-def test_kerberos_nfs4(do_ad_connection):
-    assert call('kerberos.keytab.has_nfs_principal') is True
-
-    with dataset('AD_NFS') as ds:
-        with nfs_share(f'/mnt/{ds}', options={'comment': 'KRB Test Share'}):
-            call('nfs.update', {"protocols": ["NFSV3", "NFSV4"]})
-
-            """
-            First NFS exports check. In this situation we are joined to
-            AD and therefore have a keytab with NFS entry
-
-            Expected security is:
-            "V4: / -sec=sys:krb5:krb5i:krb5p"
-            """
-            check_export_sec('sec=sys:krb5:krb5i:krb5p')
-
-            call('nfs.update', {"v4_krb": True})
-
-            """
-            Second NFS exports check. We now have an NFS SPN entry
-            Expected security is:
-            "V4: / -sec=krb5:krb5i:krb5p"
-            """
-            check_export_sec('sec=krb5:krb5i:krb5p')
-
-            """
-            v4_krb_enabled should still be True after this
-            disabling v4_krb because we still have an nfs
-            service principal in our keytab.
-            """
-            data = call('nfs.update', {'v4_krb': False})
-            assert data['v4_krb_enabled'] is True, str(data)
-
-            """
-            Third NFS exports check. We now have an NFS SPN entry
-            but v4_krb is disabled.
-            Expected security is:
-            "V4: / -sec=sys:krb5:krb5i:krb5p"
-            """
-            check_export_sec('sec=sys:krb5:krb5i:krb5p')
-
-
-def test_verify_nfs_krb_disabled():
-    """
-    This test checks that we no longer are flagged as having
-    v4_krb_enabled now that we are not joined to AD.
-    """
-    assert call('nfs.config')['v4_krb_enabled'] is False
-
-
-def test_kerberos_ticket_management(do_ad_connection):
-    klist_out = call('kerberos.klist')
-    assert klist_out['default_principal'].startswith(hostname.upper()), str(klist_out)
-    assert klist_out['ticket_cache']['type'] == 'KEYRING'
-    assert klist_out['ticket_cache']['name'].startswith('persistent:0')
-    assert len(klist_out['tickets']) != 0
-
-    to_check = None
-    for tkt in klist_out['tickets']:
-        if tkt['server'].startswith('krbtgt'):
-            to_check = tkt
-
-    assert to_check is not None, str(klist_out)
-    assert 'RENEWABLE' in to_check['flags']
-
-    call('core.get_jobs', [
-        ['method', '=', 'kerberos.wait_for_renewal'],
-        ['state', '=', 'RUNNING']
-    ], {'get': True})
-
-
-def test_check_ad_machine_account_deleted_after_ad_leave():
-    assert len(call('kerberos.keytab.query')) == 0
diff --git a/tests/api2/test_035_ad_idmap.py b/tests/api2/test_035_ad_idmap.py
deleted file mode 100644
index cc571861cd218..0000000000000
--- a/tests/api2/test_035_ad_idmap.py
+++ /dev/null
@@ -1,375 +0,0 @@
-#!/usr/bin/env python3
-
-# Author: Eric Turgeon
-# License: BSD
-# Location for tests into REST API of FreeNAS
-
-import pytest
-import sys
-import os
-import json
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from auto_config import hostname
-from base64 import b64decode
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.directory_service import active_directory
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.system import reset_systemd_svcs
-from time import sleep
-
-try:
-    from config import AD_DOMAIN, ADPASSWORD, ADUSERNAME, AD_COMPUTER_OU
-    from config import (
-        LDAPBASEDN,
-        LDAPBINDDN,
-        LDAPBINDPASSWORD,
-        LDAPHOSTNAME
-    )
-except ImportError:
-    Reason = 'ADNameServer AD_DOMAIN, ADPASSWORD, or/and ADUSERNAME are missing in config.py"'
-    pytestmark = pytest.mark.skip(reason=Reason)
-
-BACKENDS = [
-    "AD",
-    "AUTORID",
-    "LDAP",
-    "NSS",
-    "RFC2307",
-    "TDB",
-    "RID",
-]
-
-
-@pytest.fixture(scope="function")
-def idmap_domain():
-    low, high = call('idmap.get_next_idmap_range')
-    payload = {
-        "name": "canary",
-        "range_low": low,
-        "range_high": high,
-        "idmap_backend": "RID",
-        "options": {},
-    }
-    new_idmap = call('idmap.create', payload)
-
-    try:
-        yield new_idmap
-    finally:
-        call('idmap.delete', new_idmap['id'])
-
-
-@pytest.fixture(scope="module")
-def do_ad_connection(request):
-    call('service.update', 'cifs', {'enable': True})
-    try:
-        with active_directory(
-            AD_DOMAIN,
-            ADUSERNAME,
-            ADPASSWORD,
-            netbiosname=hostname,
-            createcomputer=AD_COMPUTER_OU,
-        ) as ad:
-            yield ad
-    finally:
-        call('service.update', 'cifs', {'enable': False})
-
-
-def assert_ad_healthy():
-    assert call('directoryservices.status')['type'] == 'ACTIVEDIRECTORY'
-    call('directoryservices.health.check')
-
-
-@pytest.fixture(scope="module")
-def backend_data():
-    backend_options = call('idmap.backend_options')
-    workgroup = call('smb.config')['workgroup']
-    yield {'options': backend_options, 'workgroup': workgroup}
-
-
-def test_name_sid_resolution(do_ad_connection):
-
-    # get list of AD group gids for user from NSS
-    ad_acct = call('user.get_user_obj', {'username': f'{ADUSERNAME}@{AD_DOMAIN}', 'get_groups': True})
-    groups = set(ad_acct['grouplist'])
-
-    # convert list of gids into sids
-    sids = call('idmap.convert_unixids', [{'id_type': 'GROUP', 'id': x} for x in groups])
-    sidlist = set([x['sid'] for x in sids['mapped'].values()])
-    assert len(groups) == len(sidlist)
-
-    # convert sids back into unixids
-    unixids = call('idmap.convert_sids', list(sidlist))
-    assert set([x['id'] for x in unixids['mapped'].values()]) == groups
-
-
-@pytest.mark.parametrize('backend', BACKENDS)
-def test_backend_options(do_ad_connection, backend_data, backend):
-    """
-    Tests for backend options are performend against
-    the backend for the domain we're joined to
-    (DS_TYPE_ACTIVEDIRECTORY) so that auto-detection
-    works correctly. The three default idmap backends
-    DS_TYPE_ACTIVEDIRECTORY, DS_TYPE_LDAP,
-    DS_TYPE_DEFAULT_DOMAIN have hard-coded ids and
-    so we don't need to look them up.
-    """
-    reset_systemd_svcs('winbind smbd')
-    opts = backend_data['options'][backend]['parameters'].copy()
-    WORKGROUP = backend_data['workgroup']
-    set_secret = False
-
-    payload = {
-        "name": "DS_TYPE_ACTIVEDIRECTORY",
-        "range_low": "1000000001",
-        "range_high": "2000000000",
-        "idmap_backend": backend,
-        "options": {}
-    }
-    payload3 = {"options": {}}
-    for k, v in opts.items():
-        """
-        Populate garbage data where an opt is required.
-        This should get us past the first step of
-        switching to the backend before doing more
-        comprehensive tests.
-        """
-        if v['required']:
-            payload["options"].update({k: "canary"})
-
-    if backend == 'RFC2307':
-        payload['options'].update({"ldap_server": "STANDALONE"})
-
-    if not payload['options']:
-        payload.pop('options')
-
-    call('idmap.update', 1, payload)
-
-    # We unfortunately need to sleep here on each iteration to allow time for
-    # winbind to settle down before applying more idmap changes otherwise
-    # subsequent idmap.update call will time out.
-    sleep(5)
-
-    if backend == "AUTORID":
-        IDMAP_CFG = "idmap config * "
-    else:
-        IDMAP_CFG = f"idmap config {WORKGROUP} "
-
-    """
-    Validate that backend was correctly set in smb.conf.
-    """
-    running_backend = call('smb.getparm', f'{IDMAP_CFG}: backend', 'GLOBAL')
-    assert running_backend == backend.lower()
-
-    if backend == "RID":
-        """
-        sssd_compat generates a lower range based
-        on murmur3 hash of domain SID. Since we're validating
-        basic functionilty, checking that our range_low
-        changed is sufficient for now.
-        """
-        payload2 = {"options": {"sssd_compat": True}}
-        out = call('idmap.update', 1, payload2)
-        assert out['range_low'] != payload['range_low']
-
-    elif backend == "AUTORID":
-        """
-        autorid is unique among the idmap backends because
-        its configuration replaces the default idmap backend
-        "idmap config *".
-        """
-        payload3["options"] = {
-            "rangesize": 200000,
-            "readonly": True,
-            "ignore_builtin": True,
-        }
-        call('idmap.update', 1, payload3)
-
-    elif backend == "AD":
-        payload3["options"] = {
-            "schema_mode": "SFU",
-            "unix_primary_group": True,
-            "unix_nss_info": True,
-        }
-        call('idmap.update', 1, payload3)
-
-    elif backend == "LDAP":
-        payload3["options"] = {
-            "ldap_base_dn": LDAPBASEDN,
-            "ldap_user_dn": LDAPBINDDN,
-            "ldap_url": LDAPHOSTNAME,
-            "ldap_user_dn_password": LDAPBINDPASSWORD,
-            "ssl": "ON",
-            "readonly": True,
-        }
-        call('idmap.update', 1, payload3)
-        secret = payload3["options"].pop("ldap_user_dn_password")
-        set_secret = True
-
-    elif backend == "RFC2307":
-        payload3["options"] = {
-            "ldap_server": "STANDALONE",
-            "bind_path_user": LDAPBASEDN,
-            "bind_path_group": LDAPBASEDN,
-            "user_cn": True,
-            "ldap_domain": "",
-            "ldap_url": LDAPHOSTNAME,
-            "ldap_user_dn": LDAPBINDDN,
-            "ldap_user_dn_password": LDAPBINDPASSWORD,
-            "ssl": "ON",
-            "ldap_realm": True,
-        }
-        call('idmap.update', 1, payload3)
-        r = payload3["options"].pop("ldap_realm")
-        payload3["options"]["realm"] = r
-        secret = payload3["options"].pop("ldap_user_dn_password")
-        set_secret = True
-
-    for k, v in payload3['options'].items():
-        """
-        At this point we should have added every supported option
-        for the current backend. Iterate through each option and verify
-        that it was written to samba's running configuration.
-        """
-        if k in ['realm', 'ssl']:
-            continue
-
-        res = call('smb.getparm', f'{IDMAP_CFG}: {k}', 'GLOBAL')
-        assert res is not None, f'Failed to retrieve `{IDMAP_CFG}: {k}` from running configuration'
-
-        if k == 'ldap_url':
-            v = f'ldaps://{v}'
-        elif k == 'ldap_domain':
-            v = None
-
-        if v == 'STANDALONE':
-            v = 'stand-alone'
-
-        try:
-            res = json.loads(res)
-            assert res == v, f"{backend} - [{k}]: {res}"
-        except json.decoder.JSONDecodeError:
-            if isinstance(v, bool):
-                v = str(v)
-
-            if v is None:
-                assert res in (None, ''), f"{backend} - [{k}]: {res}"
-            else:
-                assert v.casefold() == res.casefold(), f"{backend} - [{k}]: {res}"
-
-    if set_secret:
-        """
-        API calls that set an idmap secret should result in the
-        secret being written to secrets.tdb in Samba's private
-        directory. To check this, force a secrets db dump, check
-        for keys, then decode secret.
-        """
-        idmap_secret = call('directoryservices.secrets.get_ldap_idmap_secret', WORKGROUP, LDAPBINDDN)
-        db_secrets = call('directoryservices.secrets.get_db_secrets')[f'{hostname.upper()}$']
-
-        # Check that our secret is written and stored in secrets backup correctly
-        assert idmap_secret == db_secrets[f"SECRETS/GENERIC/IDMAP_LDAP_{WORKGROUP}/{LDAPBINDDN}"]
-        decoded_sec = b64decode(idmap_secret).rstrip(b'\x00').decode()
-        assert secret == decoded_sec, idmap_secret
-
-        # Use net command via samba to rewrite secret and make sure it is same
-        ssh(f"net idmap set secret {WORKGROUP} '{secret}'")
-        new_idmap_secret = call('directoryservices.secrets.get_ldap_idmap_secret', WORKGROUP, LDAPBINDDN)
-        assert idmap_secret == new_idmap_secret
-
-        secrets_dump = call('directoryservices.secrets.dump')
-        assert secrets_dump == db_secrets
-
-    # reset idmap backend to RID to ensure that winbindd is running
-    reset_systemd_svcs('winbind smbd')
-
-    payload = {
-        "name": "DS_TYPE_ACTIVEDIRECTORY",
-        "range_low": "1000000001",
-        "range_high": "2000000000",
-        "idmap_backend": 'RID',
-        "options": {}
-    }
-    call('idmap.update', 1, payload)
-
-
-def test_clear_idmap_cache(do_ad_connection):
-    call('idmap.clear_idmap_cache', job=True)
-
-
-def test_idmap_overlap_fail(do_ad_connection):
-    """
-    It should not be possible to set an idmap range for a new
-    domain that overlaps an existing one.
-    """
-    assert_ad_healthy()
-    payload = {
-        "name": "canary",
-        "range_low": "20000",
-        "range_high": "2000000000",
-        "idmap_backend": "RID",
-        "options": {}
-    }
-    with pytest.raises(ValidationErrors):
-        call('idmap.create', payload)
-
-
-def test_idmap_default_domain_name_change_fail():
-    """
-    It should not be possible to change the name of a
-    default idmap domain.
-    """
-    assert_ad_healthy()
-    payload = {
-        "name": "canary",
-        "range_low": "1000000000",
-        "range_high": "2000000000",
-        "idmap_backend": "RID",
-        "options": {}
-    }
-    with pytest.raises(ValidationErrors):
-        call('idmap.create', payload)
-
-
-def test_idmap_low_high_range_inversion_fail(request):
-    """
-    It should not be possible to set an idmap low range
-    that is greater than its high range.
-    """
-    assert_ad_healthy()
-    payload = {
-        "name": "canary",
-        "range_low": "2000000000",
-        "range_high": "1900000000",
-        "idmap_backend": "RID",
-    }
-    with pytest.raises(ValidationErrors):
-        call('idmap.create', payload)
-
-
-def test_idmap_new_domain_duplicate_fail(idmap_domain):
-    """
-    It should not be possible to create a new domain that
-    has a name conflict with an existing one.
-    """
-    low, high = call('idmap.get_next_idmap_range')
-    payload = {
-        "name": idmap_domain["name"],
-        "range_low": low,
-        "range_high": high,
-        "idmap_backend": "RID",
-    }
-    with pytest.raises(ValidationErrors):
-        call('idmap.create', payload)
-
-
-def test_idmap_new_domain_autorid_fail(idmap_domain):
-    """
-    It should only be possible to set AUTORID on
-    default domain.
-    """
-    payload = {
-        "idmap_backend": "AUTORID",
-    }
-    with pytest.raises(ValidationErrors):
-        call('idmap.update', idmap_domain['id'], payload)
diff --git a/tests/api2/test_040_ad_user_group_cache.py b/tests/api2/test_040_ad_user_group_cache.py
deleted file mode 100644
index 00dbcb17c8f54..0000000000000
--- a/tests/api2/test_040_ad_user_group_cache.py
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/usr/bin/env python3
-
-import errno
-import pytest
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from functions import SSH_TEST
-from auto_config import password, user
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.directory_service import active_directory
-from middlewared.test.integration.utils import call
-
-
-WINBIND_SEPARATOR = "\\"
-
-
-@pytest.fixture(scope="module")
-def do_ad_connection(request):
-    with active_directory() as ad:
-        # make sure we are extra sure cache fill complete
-        cache_fill_job = call(
-            'core.get_jobs',
-            [['method', '=', 'directoryservices.cache.refresh_impl']],
-            {'order_by': ['-id'], 'get': True}
-        )
-        if cache_fill_job['state'] == 'RUNNING':
-            call('core.job_wait', cache_fill_job['id'], job=True)
-
-        users = [x['username'] for x in call(
-            'user.query', [['local', '=', False]],
-        )]
-
-        set_users = set(users)
-        assert len(set_users) == len(users)
-
-        groups = [x['name'] for x in call(
-            'group.query', [['local', '=', False]],
-        )]
-
-        set_groups = set(groups)
-        assert len(set_groups) == len(groups)
-
-        yield ad | {'users': set_users, 'groups': set_groups}
-
-
-def get_ad_user_and_group(ad_connection):
-    WORKGROUP = ad_connection['dc_info']['Pre-Win2k Domain']
-
-    domain_prefix = f'{WORKGROUP.upper()}{WINBIND_SEPARATOR}'
-    ad_user = ad_connection['user_obj']['pw_name']
-    ad_group = f'{domain_prefix}domain users'
-
-    user = call(
-        'user.query', [['username', '=', ad_user]],
-        {'get': True}
-    )
-
-    group = call(
-        'group.query', [['name', '=', ad_group]],
-        {'get': True}
-    )
-
-    return (user, group)
-
-
-def test_check_for_ad_users(do_ad_connection):
-    """
-    This test validates that wbinfo -u output matches entries
-    we get through user.query
-    """
-    cmd = "wbinfo -u"
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'], str(results['output'])
-    wbinfo_entries = set(results['stdout'].splitlines())
-
-    assert wbinfo_entries == do_ad_connection['users']
-
-
-def test_check_for_ad_groups(do_ad_connection):
-    """
-    This test validates that wbinfo -g output matches entries
-    we get through group.query
-    """
-    cmd = "wbinfo -g"
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'], str(results['output'])
-    wbinfo_entries = set(results['stdout'].splitlines())
-
-    assert wbinfo_entries == do_ad_connection['groups']
-
-
-def test_check_directoryservices_cache_refresh(do_ad_connection):
-    """
-    This test validates that middleware can successfully rebuild the
-    directory services cache from scratch using the public API.
-
-    This currently happens once per 24 hours. Result of failure here will
-    be lack of users/groups visible in webui.
-    """
-
-    # Cache resides in tdb files. Remove the files to clear cache.
-    cmd = 'rm -f /root/tdb/persistent/*'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, results['output']
-
-    # directoryservices.cache_refresh job causes us to rebuild / refresh LDAP / AD users.
-    call('directoryservices.cache.refresh_impl', job=True)
-
-    users = set([x['username'] for x in call(
-        'user.query', [['local', '=', False]]
-    )])
-
-    assert users == do_ad_connection['users']
-
-    groups = set([x['name'] for x in call(
-        'group.query', [['local', '=', False]],
-    )])
-
-    assert groups == do_ad_connection['groups']
-
-
-def test_check_lazy_initialization_of_users_and_groups_by_name(do_ad_connection):
-    """
-    When users explicitly search for a directory service or other user
-    by name or id we should hit pwd and grp modules and synthesize a
-    result if the user / group is not in the cache. This special behavior
-    only occurs when single filter of "name =" or "id =". So after the
-    initial query that should result in insertion, we add a second filter
-    to only hit the cache. Code paths are slightly different for lookups
-    by id or by name and so they are tested separately.
-    """
-
-    cmd = 'rm -f /root/tdb/persistent/*'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, results['output']
-
-    ad_user, ad_group = get_ad_user_and_group(do_ad_connection)
-
-    assert ad_user['id_type_both'] is True
-    assert ad_user['immutable'] is True
-    assert ad_user['local'] is False
-    assert ad_group['id_type_both'] is True
-    assert ad_group['local'] is False
-
-    cache_names = set([x['username'] for x in call(
-        'user.query', [['local', '=', False]],
-    )])
-
-    assert cache_names == {ad_user['username']}
-
-    cache_names = set([x['name'] for x in call(
-        'group.query', [['local', '=', False]],
-    )])
-
-    assert cache_names == {ad_group['name']}
-
-
-def test_check_lazy_initialization_of_users_and_groups_by_id(do_ad_connection):
-    """
-    When users explicitly search for a directory service or other user
-    by name or id we should hit pwd and grp modules and synthesize a
-    result if the user / group is not in the cache. This special behavior
-    only occurs when single filter of "name =" or "id =". So after the
-    initial query that should result in insertion, we add a second filter
-    to only hit the cache. Code paths are slightly different for lookups
-    by id or by name and so they are tested separately.
-    """
-
-    ad_user, ad_group = get_ad_user_and_group(do_ad_connection)
-
-    cmd = 'rm -f /root/tdb/persistent/*'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, results['output']
-
-    call('user.query', [['uid', '=', ad_user['uid']]], {'get': True})
-
-    call('group.query', [['gid', '=', ad_group['gid']]], {'get': True})
-
-    cache_names = set([x['username'] for x in call(
-        'user.query', [['local', '=', False]],
-    )])
-
-    assert cache_names == {ad_user['username']}
-
-    cache_names = set([x['name'] for x in call(
-        'group.query', [['local', '=', False]],
-    )])
-
-    assert cache_names == {ad_group['name']}
-
-@pytest.mark.parametrize('op_type', ('UPDATE', 'DELETE'))
-def test_update_delete_failures(do_ad_connection, op_type):
-    ad_user, ad_group = get_ad_user_and_group(do_ad_connection)
-
-    for acct, prefix in ((ad_user, 'user'), (ad_group, 'group')):
-        with pytest.raises(CallError) as ce:
-            if op_type == 'UPDATE':
-                call(f'{prefix}.update', acct['id'], {'smb': False})
-            else:
-                call(f'{prefix}.delete', acct['id'])
-
-        assert ce.value.errno == errno.EPERM
diff --git a/tests/api2/test_070_alertservice.py b/tests/api2/test_070_alertservice.py
deleted file mode 100644
index 9540aaf651723..0000000000000
--- a/tests/api2/test_070_alertservice.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from middlewared.test.integration.utils import call
-
-
-def test_alert_gets():
-    call("alertservice.query")
-
-
-def test_alertservice():
-    data = ["name", "type", "attributes", "level", "enabled"]
-
-    # create
-    payload = {
-        "name": "Critical Email Test",
-        "type": "Mail",
-        "attributes": {
-            "email": "eric.spam@ixsystems.com"
-        },
-        "level": "CRITICAL",
-        "enabled": True
-    }
-    results = call("alertservice.create", payload)
-    for key in data:
-        assert results[key] == payload[key]
-
-    alertservice_id = results['id']
-
-    # update
-    payload = {
-        "name": "Warning Email Test",
-        "type": "Mail",
-        "attributes": {
-            "email": "william.spam@ixsystems.com@"
-        },
-        "level": "WARNING",
-        "enabled": False
-    }
-    results = call(f"alertservice.update", alertservice_id, payload)
-    for key in data:
-        assert results[key] == payload[key]
-
-    # delete
-    call("alertservice.delete", alertservice_id)
-    assert call("alertservice.query", [["id", "=", alertservice_id]]) == []
diff --git a/tests/api2/test_110_certificate.py b/tests/api2/test_110_certificate.py
deleted file mode 100644
index 0b053b7772a3a..0000000000000
--- a/tests/api2/test_110_certificate.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import pytest
-import re
-
-from time import sleep
-from middlewared.test.integration.utils import call
-
-try:
-    from config import (
-        LDAPBASEDN,
-        LDAPBINDDN,
-        LDAPBINDPASSWORD,
-        LDAPHOSTNAME,
-    )
-except ImportError:
-    Reason = "LDAP* variable are not setup in config.py"
-    # comment pytestmark for development testing with --dev-test
-    pytestmark = pytest.mark.skipif(True, reason=Reason)
-
-
-def test_certificate():
-    # create certificate
-    payload = {
-        "name": "BOB",
-        "range_low": 1000,
-        "range_high": 2000,
-        "certificate": 1,
-        "idmap_backend": "RFC2307",
-        "options": {
-            "ldap_server": "STANDALONE",
-            "bind_path_user": LDAPBASEDN,
-            "bind_path_group": LDAPBASEDN,
-            "ldap_url": LDAPHOSTNAME,
-            "ldap_user_dn": LDAPBINDDN,
-            "ldap_user_dn_password": LDAPBINDPASSWORD,
-            "ssl": "ON",
-            "ldap_realm": False,
-        }
-    }
-    results = call("idmap.create", payload)
-    idmap_id = int(results["id"])
-    certificate_id = results["certificate"]["id"]
-
-    # successful delete
-    results = call("certificate.delete", certificate_id, True)
-    job_id = int(results)
-
-    # failed delete
-    while True:
-        get_job = call("core.get_jobs", [["id", "=", job_id]])
-        job_status = get_job[0]
-        if job_status["state"] in ("RUNNING", "WAITING"):
-            sleep(1)
-        else:
-            assert job_status["state"] == "FAILED", get_job
-            assert bool(re.search(
-                r"Certificate is being used by following service.*IDMAP", job_status["error"], flags=re.DOTALL
-            )) is True, job_status["error"]
-            break
-
-    # delete idmap
-    call("idmap.delete", idmap_id)
diff --git a/tests/api2/test_190_filesystem.py b/tests/api2/test_190_filesystem.py
deleted file mode 100644
index fd23fd648ba87..0000000000000
--- a/tests/api2/test_190_filesystem.py
+++ /dev/null
@@ -1,331 +0,0 @@
-import errno
-import stat
-import os
-from copy import deepcopy
-
-import pytest
-
-from auto_config import pool_name
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.filesystem import directory
-from middlewared.test.integration.assets.pool import dataset as create_dataset
-from middlewared.test.integration.utils import call, ssh
-from truenas_api_client import ClientException
-
-
-@pytest.mark.parametrize("spath", ("/boot/grub", "/root", "/bin", "/usr/bin"))
-def test_filesystem_stat_results_for_path_(spath):
-    results = call("filesystem.stat", spath)
-    for key in (
-        "allocation_size",
-        "size",
-        "mode",
-        "dev",
-        "inode",
-        "uid",
-        "gid",
-        "nlink",
-        "mount_id",
-        "dev",
-        "inode",
-    ):
-        assert isinstance(results[key], int)
-        if key in ("uid", "gid"):
-            assert results[key] == 0
-        elif key == "nlink":
-            assert -1 < results[key] < 10
-
-    for key in ("atime", "mtime", "ctime"):
-        assert isinstance(results[key], float)
-
-    for key in ("user", "group"):
-        assert results[key] == "root"
-
-    assert results["acl"] is False
-    if spath == "/bin":
-        assert results["type"] == "SYMLINK"
-        assert results["realpath"] == "/usr/bin"
-    else:
-        assert results["type"] == "DIRECTORY"
-        assert results["realpath"] == spath
-
-
-def test_filesystem_statfs_fstype():
-    parent_path = f"/mnt/{pool_name}"
-    data = call("filesystem.statfs", parent_path)
-    assert data["fstype"] == "zfs", data["fstype"]
-    nested_path = f"{parent_path}/tmpfs"
-    ssh(f"mkdir -p {nested_path}; mount -t tmpfs -o size=10M tmpfstest {nested_path}")
-    data = call("filesystem.statfs", nested_path)
-    assert data["fstype"] == "tmpfs", data["fstype"]
-    ssh(f"umount {nested_path}; rmdir {nested_path}")
-
-
-def test_immutable_flag():
-    t_path = os.path.join("/mnt", pool_name, "random_directory_immutable")
-    t_child_path = os.path.join(t_path, "child")
-    with directory(t_path) as d:
-        for flag_set in (True, False):
-            call('filesystem.set_zfs_attributes', {
-                'path': d,
-                'zfs_file_attributes': {'immutable': flag_set}
-            })
-            # We test 2 things
-            # 1) Writing content to the parent path fails/succeeds based on "set"
-            # 2) "is_immutable_set" returns sane response
-            if flag_set:
-                with pytest.raises(PermissionError):
-                    call("filesystem.mkdir", f"{t_child_path}_{flag_set}")
-            else:
-                call("filesystem.mkdir", f"{t_child_path}_{flag_set}")
-
-            is_immutable = 'IMMUTABLE' in call('filesystem.stat', t_path)['attributes']
-            err = "Immutable flag is still not set"
-            if not flag_set:
-                err = "Immutable flag is still set"
-            assert is_immutable == flag_set, err
-
-
-def test_filesystem_listdir_exclude_non_mounts():
-    with directory("/mnt/random_dir"):
-        # exclude dirs at root of /mnt since this
-        # directory is used exclusively to mount zpools
-        for i in call("filesystem.listdir", "/mnt"):
-            assert i["name"] != "random_dir"
-
-
-def test_filesystem_stat_filetype():
-    """
-    This test checks that file types are properly
-    identified through the filesystem plugin in middleware.
-    There is an additional check to make sure that paths
-    in the ZFS CTL directory (.zfs) are properly flagged.
-    """
-    ds_name = "stat_test"
-    targets = ("file", "directory", "symlink", "other")
-    with create_dataset(ds_name) as ds:
-        base = f"/mnt/{ds}"
-        ssh(
-            " && ".join(
-                (
-                    f"mkdir {base}/directory",
-                    f"touch {base}/file",
-                    f"ln -s {base}/file {base}/symlink",
-                    f"mkfifo {base}/other",
-                )
-            )
-        )
-        for x in targets:
-            statout = call("filesystem.stat", f"{base}/{x}")
-            assert statout["type"] == x.upper()
-            assert not statout["is_ctldir"]
-
-        snap_name = f"{ds_name}_snap1"
-        call(
-            "zfs.snapshot.create",
-            {
-                "dataset": ds,
-                "name": snap_name,
-                "recursive": False,
-            },
-        )
-        for x in targets:
-            target = f"{base}/.zfs/snapshot/{snap_name}/{x}"
-            statout = call("filesystem.stat", target)
-            assert statout["type"] == x.upper()
-            assert statout["is_ctldir"]
-
-        assert call("filesystem.stat", f"{base}/.zfs/snapshot/{snap_name}")["is_ctldir"]
-        assert all(
-            dirent["is_ctldir"]
-            for dirent in call(
-                "filesystem.listdir",
-                f"{base}/.zfs/snapshot",
-                [],
-                {"select": ["name", "is_ctldir"]},
-            )
-        )
-        assert call("filesystem.stat", f"{base}/.zfs/snapshot")["is_ctldir"]
-        assert all(
-            dirent["is_ctldir"]
-            for dirent in call(
-                "filesystem.listdir",
-                f"{base}/.zfs",
-                [],
-                {"select": ["name", "is_ctldir"]},
-            )
-        )
-        assert call("filesystem.stat", f"{base}/.zfs")["is_ctldir"]
-
-
-def test_fiilesystem_statfs_flags():
-    """
-    This test verifies that changing ZFS properties via
-    middleware causes mountinfo changes visible via statfs.
-    """
-    properties = (
-        # tuple: ZFS property name, property value, mountinfo value
-        ("readonly", "ON", "RO"),
-        ("readonly", "OFF", "RW"),
-        ("atime", "OFF", "NOATIME"),
-        ("exec", "OFF", "NOEXEC"),
-        ("acltype", "NFSV4", "NFS4ACL"),
-        ("acltype", "POSIX", "POSIXACL"),
-    )
-    with create_dataset("statfs_test") as ds:
-        base = f"/mnt/{ds}"
-        for p in properties:
-            # set option we're checking and make sure it's really set
-            payload = {p[0]: p[1]}
-            if p[0] == "acltype":
-                payload.update(
-                    {"aclmode": "RESTRICTED" if p[1] == "NFSV4" else "DISCARD"}
-                )
-            assert call("pool.dataset.update", ds, payload)[p[0]]["value"] == p[1]
-
-            # check statfs results
-            mount_flags = call("filesystem.statfs", base)["flags"]
-            assert p[2] in mount_flags, f"{base}: ({p[2]}) not in {mount_flags}"
-
-
-def test_dosmodes():
-    modes = ("readonly", "hidden", "system", "archive", "offline", "sparse")
-    with create_dataset("dosmode_test") as ds:
-        base = f"/mnt/{ds}"
-        testpaths = (f"{base}/testfile", f"{base}/testdir")
-        ssh(f"touch {testpaths[0]}; mkdir {testpaths[1]}")
-        for p in testpaths:
-            expected_flags = call("filesystem.get_zfs_attributes", p)
-            for m in modes:
-                to_set = {m: not expected_flags[m]}
-                res = call(
-                    "filesystem.set_zfs_attributes",
-                    {"path": p, "zfs_file_attributes": to_set},
-                )
-                expected_flags.update(to_set)
-                assert expected_flags == res
-                res = call("filesystem.get_zfs_attributes", p)
-                assert expected_flags == res
-
-
-def test_acl_path_execute_validation():
-    perm = {"BASIC": "FULL_CONTROL"}
-    flag = {"BASIC": "INHERIT"}
-    NFSV4_DACL = [
-        {"tag": "owner@", "id": -1, "type": "ALLOW", "perms": perm, "flags": flag},
-        {"tag": "group@", "id": -1, "type": "ALLOW", "perms": perm, "flags": flag},
-        {"tag": "USER", "id": 65534, "type": "ALLOW", "perms": perm, "flags": flag},
-        {"tag": "GROUP", "id": 65534, "type": "ALLOW", "perms": perm, "flags": flag},
-    ]
-    with create_dataset(
-        "acl_execute_test",
-        data={"acltype": "NFSV4", "aclmode": "PASSTHROUGH"},
-        mode="770",
-    ) as ds:
-        path = f"/mnt/{ds}"
-        """
-        For NFSv4 ACLs four different tags generate user tokens differently:
-        1) owner@ tag will test `uid` from payload
-        2) group@ tag will test `gid` from payload
-        3) GROUP will test the `id` in payload with id_type
-        4) USER will test the `id` in mayload with USER id_type
-        """
-        # Start with testing denials
-        with create_dataset(
-            "acl_execute_test/sub", data={"acltype": "NFSV4", "aclmode": "PASSTHROUGH"}
-        ) as sub_ds:
-            sub_path = f"/mnt/{sub_ds}"
-            acl = deepcopy(NFSV4_DACL)
-            names = ("daemon", "apps", "nobody", "nogroup")
-            for idx, entry in enumerate(NFSV4_DACL):
-                with pytest.raises(ClientException, match=f"{names[idx]}"):
-                    # all of these tests should fail and the user account
-                    # should be in the error message raised
-                    call(
-                        "filesystem.setacl",
-                        {"path": sub_path, "dacl": acl, "uid": 1, "gid": 568},
-                        job=True,
-                    )
-                acl.pop(0)
-
-            # when this test starts, we have 770 perms on parent
-            for entry in NFSV4_DACL:
-                # first set permissions on parent dataset
-                if entry["tag"] == "owner@":
-                    call(
-                        "filesystem.chown", {"path": path, "uid": 1, "gid": 0}, job=True
-                    )
-                elif entry["tag"] == "group@":
-                    call(
-                        "filesystem.chown",
-                        {"path": path, "uid": 0, "gid": 568},
-                        job=True,
-                    )
-                elif entry["tag"] == "USER":
-                    call(
-                        "filesystem.setacl",
-                        {"path": path, "uid": 0, "gid": 0, "dacl": [entry]},
-                        job=True,
-                    )
-                elif entry["tag"] == "GROUP":
-                    call(
-                        "filesystem.setacl",
-                        {"path": path, "uid": 0, "gid": 0, "dacl": [entry]},
-                        job=True,
-                    )
-
-                # Now set the acl on child dataset. This should succeed
-                call(
-                    "filesystem.setacl",
-                    {"path": sub_path, "uid": 1, "gid": 568, "dacl": [entry]},
-                    job=True,
-                )
-
-
-@pytest.fixture(scope="module")
-def file_and_directory():
-    with create_dataset("test_file_and_directory") as ds:
-        ssh(f"mkdir /mnt/{ds}/test-directory; touch /mnt/{ds}/test-file")
-        yield ds
-
-
-@pytest.mark.parametrize(
-    "query,result",
-    [
-        ([], {"test-directory", "test-file"}),
-        ([["type", "=", "DIRECTORY"]], {"test-directory"}),
-        ([["type", "!=", "DIRECTORY"]], {"test-file"}),
-        ([["type", "=", "FILE"]], {"test-file"}),
-        ([["type", "!=", "FILE"]], {"test-directory"}),
-    ],
-)
-def test_type_filter(file_and_directory, query, result):
-    listdir = call("filesystem.listdir", f"/mnt/{file_and_directory}", query)
-    assert {item["name"] for item in listdir} == result, listdir
-
-
-def test_mkdir_mode():
-    with create_dataset("test_mkdir_mode") as ds:
-        testdir = os.path.join("/mnt", ds, "testdir")
-        call("filesystem.mkdir", {"path": testdir, "options": {"mode": "777"}})
-        st = call("filesystem.stat", testdir)
-        assert stat.S_IMODE(st["mode"]) == 0o777
-
-
-def test_mkdir_chmod_failure():
-    with create_dataset("test_mkdir_chmod", {"share_type": "SMB"}) as ds:
-        testdir = os.path.join("/mnt", ds, "testdir")
-        with pytest.raises(PermissionError):
-            call("filesystem.mkdir", {"path": testdir, "options": {"mode": "777"}})
-        with pytest.raises(CallError) as ce:
-            call("filesystem.stat", testdir)
-        assert ce.value.errno == errno.ENOENT
-        mkdir_st = call(
-            "filesystem.mkdir",
-            {"path": testdir, "options": {"mode": "777", "raise_chmod_error": False}},
-        )
-        st = call("filesystem.stat", testdir)
-        # Verify that mode output returned from mkdir matches what was actually set
-        assert st["mode"] == mkdir_st["mode"]
-        # mkdir succeeded, but chmod failed so we get mode based on inherited ACL (SMB preset)
-        assert stat.S_IMODE(st["mode"]) == 0o770
diff --git a/tests/api2/test_200_ftp.py b/tests/api2/test_200_ftp.py
deleted file mode 100644
index 8c650206a56cc..0000000000000
--- a/tests/api2/test_200_ftp.py
+++ /dev/null
@@ -1,1404 +0,0 @@
-import contextlib
-import copy
-import json
-import os
-import subprocess
-from ftplib import all_errors, error_temp
-from time import sleep
-from timeit import default_timer as timer
-from types import SimpleNamespace
-
-import pytest
-from pytest_dependency import depends
-
-from assets.websocket.server import reboot
-from middlewared.test.integration.assets.account import user as ftp_user
-from middlewared.test.integration.assets.pool import dataset as dataset_asset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-from auto_config import password, pool_name, user
-from functions import SSH_TEST, send_file
-from protocols import ftp_connect, ftp_connection, ftps_connection
-
-FTP_DEFAULT = {}
-DB_DFLT = {}
-INIT_DIRS_AND_FILES = {
-    'path': None,
-    'dirs': [
-        {'name': 'init_dir'},
-        {'name': 'init_ro_dir', 'perm': '-w',
-         'contents': ["ReadOnlyDir_file1", "ReadOnlyDir_file2"]}
-    ],
-    'files': [{'name': 'init_file', 'contents': "Contents of init_file"},
-              {'name': 'init_ro_file', 'contents': "RO data", 'perm': '-w'}],
-}
-
-
-# ================= Utility Functions ==================
-
-
-@pytest.fixture(scope='module')
-def ftp_init_db_dflt():
-    # Get the 'default' settings from FTPModel
-    ftpconf_script = '#!/usr/bin/python3\n'
-    ftpconf_script += 'import json\n'
-    ftpconf_script += 'from middlewared.plugins.ftp import FTPModel\n'
-    ftpconf_script += 'FTPModel_defaults = {}\n'
-    ftpconf_script += 'for attrib in FTPModel.__dict__.keys():\n'
-    ftpconf_script += '    if attrib[:4] == "ftp_":\n'
-    ftpconf_script += '        try:\n'
-    ftpconf_script += '            val = getattr(getattr(FTPModel, attrib), "default").arg\n'
-    ftpconf_script += '        except AttributeError:\n'
-    ftpconf_script += '            val = None\n'
-    ftpconf_script += '        if not callable(val):\n'
-    ftpconf_script += '            FTPModel_defaults[attrib] = val\n'
-    ftpconf_script += 'print(json.dumps(FTPModel_defaults))\n'
-    cmd_file = open('ftpconf.py', 'w')
-    cmd_file.writelines(ftpconf_script)
-    cmd_file.close()
-    results = send_file('ftpconf.py', 'ftpconf.py', user, password, truenas_server.ip)
-    assert results['result'], str(results['output'])
-    rv_defaults = SSH_TEST("python3 ftpconf.py", user, password)
-    assert rv_defaults['result'], str(rv_defaults)
-    global FTP_DEFAULT
-    FTP_DEFAULT = json.loads(rv_defaults['stdout'].strip())
-
-    # clean up the temporary script
-    os.remove('ftpconf.py')
-    results = SSH_TEST('rm ftpconf.py', user, password)
-    assert results['result'] is True, results
-
-    # # Special cases: The default banner is in a file (see proftpd.conf.mako)
-    assert FTP_DEFAULT['ftp_banner'] is None, FTP_DEFAULT['ftp_banner']
-
-    # Make the default model keys match the DB names
-    global DB_DFLT
-    DB_DFLT = {k.replace('ftp_', ''): FTP_DEFAULT[k] for k in FTP_DEFAULT}
-    return DB_DFLT
-
-
-def ftp_set_config(config={}):
-    # Fixup some settings
-    if config != {}:
-        tmpconf = config.copy()
-        if 'banner' in tmpconf and tmpconf['banner'] is None:
-            tmpconf['banner'] = ""
-        if 'anonpath' in tmpconf and tmpconf['anonpath'] is False:
-            tmpconf['anonpath'] = ""
-        if 'masqaddress' in tmpconf and tmpconf['masqaddress'] is None:
-            tmpconf['masqaddress'] = ''
-        if 'ssltls_certificate_id' in tmpconf and tmpconf['ssltls_certificate_id'] is None:
-            tmpconf.pop('ssltls_certificate_id')
-        if 'options' in tmpconf and tmpconf['options'] is None:
-            tmpconf['options'] = ''
-        call('ftp.update', tmpconf)
-
-
-def parse_conf_file(file='proftpd'):
-    results = SSH_TEST(f"cat /etc/proftpd/{file}.conf", user, password)
-    assert results['result'], str(results)
-    lines = results['stdout'].splitlines()
-
-    rv = {}
-    context = [{'server': None}]
-    for line in lines:
-        line = line.lstrip()
-        if not line or line.startswith('#'):
-            continue
-
-        # Keep track of contexts
-        if line.startswith('<'):
-            if line[1] == "/":
-                context.pop()
-                continue
-            else:
-                c = line.split()[0][1:]
-                v = line.split()[1][:-1] if len(line.split()) > 1 else None
-                context.append({c: v})
-                continue
-
-        # Process the directive
-        if 1 < len(line.strip().split()):
-            # Trap TransferRate directive
-            if "TransferRate" == line.split()[0]:
-                tmp = line.split()
-                directive = ' '.join(tmp[:2])
-                value = ' '.join(tmp[2:])
-            else:
-                directive, value = line.strip().split(maxsplit=1)
-        else:
-            directive = line.strip()
-            value = None
-        entry = {directive: [copy.deepcopy(context), value]}
-        rv.update(entry)
-    return rv
-
-
-def query_ftp_service():
-    return call('service.query', [['service', '=', 'ftp']], {'get': True})
-
-
-def validate_proftp_conf():
-    '''
-    Confirm FTP configuration settings
-    NB: Avoid calling this for localuser* and anonuser* in the same test
-    '''
-    xlat = {True: "on", False: "off"}
-    # Retrieve result from the database
-    ftpConf = call('ftp.config')
-    parsed = parse_conf_file('proftpd')
-
-    # Sanity spot check settings in proftpd.conf
-    assert ftpConf['port'] == int(parsed['Port'][1])
-    assert ftpConf['clients'] == int(parsed['MaxClients'][1]), f"\nftpConf={ftpConf}\nparsed={parsed}"
-    assert ftpConf['ipconnections'] == int(parsed['MaxConnectionsPerHost'][1])
-    assert ftpConf['loginattempt'] == int(parsed['MaxLoginAttempts'][1])
-    assert ftpConf['timeout'] == int(parsed['TimeoutIdle'][1])
-    assert ftpConf['timeout_notransfer'] == int(parsed['TimeoutNoTransfer'][1])
-
-    # Confirm that rootlogin has been removed.
-    assert ftpConf.get('rootlogin') is None
-
-    if ftpConf['onlyanonymous']:
-        assert 'User' in parsed
-        assert ftpConf['anonpath'] == parsed['User'][0][1]['Anonymous'], f"parsed['User'] = {parsed['User']}"
-        assert parsed['UserAlias'][1] == 'anonymous ftp'
-        assert parsed['Group'][1] == 'ftp'
-        assert 'LOGIN' == parsed['AllowAll'][0][2]['Limit'], \
-            f"AllowAll must be within <imit LOGIN>, {parsed['AllowAll']}"
-    else:
-        assert parsed['User'][1] == 'nobody'
-
-    if ftpConf['onlylocal']:
-        assert 'AllowAll' in parsed
-        assert 'LOGIN' == parsed['AllowAll'][0][1]['Limit'], \
-            f"AllowAll must be within <imit LOGIN>, {parsed['AllowAll']}"
-    else:
-        if not ftpConf['onlyanonymous']:
-            assert 'AllowAll' not in parsed
-
-    # The absence of onlyanonymous and onlyonly mean some settings are present
-    if not (ftpConf['onlyanonymous'] or ftpConf['onlylocal']):
-        assert 'DenyAll' in parsed
-        assert 'LOGIN' == parsed['DenyAll'][0][1]['Limit']
-        # Confirm rootlogin has been removed.
-        assert 'root' not in parsed['AllowGroup']
-
-    # The banner is saved to a file
-    rv_motd = SSH_TEST("cat /etc/proftpd/proftpd.motd", user, password)
-    assert rv_motd['result'], str(rv_motd)
-    motd = rv_motd['stdout'].strip()
-    if ftpConf['banner']:
-        assert motd == ftpConf['banner'], f"\nproftpd.motd = \'{motd}\'\nbanner = \'{ftpConf['banner']}\'"
-
-    expect_umask = f"{ftpConf['filemask']} {ftpConf['dirmask']}"
-    assert expect_umask == parsed['Umask'][1], \
-        f"Found unexpected Umask entry: expected '{expect_umask}', found '{parsed['Umask'][1]}'"
-    assert xlat[ftpConf['fxp']] == parsed['AllowForeignAddress'][1]
-    if ftpConf['resume']:
-        assert xlat[ftpConf['resume']] == parsed['AllowRetrieveRestart'][1]
-        assert xlat[ftpConf['resume']] == parsed['AllowStoreRestart'][1]
-
-    # The DefaultRoot setting is defined completly in proftpd.conf.mako as '~ !root'
-    if ftpConf['defaultroot']:
-        assert parsed['DefaultRoot'][1] == "~ !root"
-
-    assert xlat[ftpConf['ident']] == parsed['IdentLookups'][1]
-    assert xlat[ftpConf['reversedns']] == parsed['UseReverseDNS'][1]
-
-    if ftpConf['masqaddress']:
-        assert ftpConf['masqaddress'] == parsed['MasqueradeAddress'][1]
-
-    if ftpConf['passiveportsmin']:
-        expect_setting = f"{ftpConf['passiveportsmin']} {ftpConf['passiveportsmax']}"
-        assert expect_setting == parsed['PassivePorts'][1], \
-            f"Found unexpected PassivePorts entry: expected '{expect_setting}', found '{parsed['PassivePorts'][1]}'"
-
-    if ftpConf['localuserbw']:
-        assert ftpConf['localuserbw'] == int(parsed['TransferRate STOR'][1])
-    if ftpConf['localuserdlbw']:
-        assert ftpConf['localuserdlbw'] == int(parsed['TransferRate RETR'][1])
-    if ftpConf['anonuserbw']:
-        assert ftpConf['anonuserbw'] == int(parsed['TransferRate STOR'][1])
-    if ftpConf['anonuserdlbw']:
-        assert ftpConf['anonuserdlbw'] == int(parsed['TransferRate RETR'][1])
-
-    if ftpConf['tls']:
-        parsed = parsed | parse_conf_file('tls')
-
-        # These two are 'fixed' settings in proftpd.conf.mako, but they are important
-        assert parsed['TLSEngine'][1] == 'on'
-        assert parsed['TLSProtocol'][1] == 'TLSv1.2 TLSv1.3'
-
-        if 'TLSOptions' in parsed:
-            # Following the same method from proftpd.conf.mako
-            tls_options = []
-            for k, v in [
-                ('allow_client_renegotiations', 'AllowClientRenegotiations'),
-                ('allow_dot_login', 'AllowDotLogin'),
-                ('allow_per_user', 'AllowPerUser'),
-                ('common_name_required', 'CommonNameRequired'),
-                ('enable_diags', 'EnableDiags'),
-                ('export_cert_data', 'ExportCertData'),
-                ('no_empty_fragments', 'NoEmptyFragments'),
-                ('no_session_reuse_required', 'NoSessionReuseRequired'),
-                ('stdenvvars', 'StdEnvVars'),
-                ('dns_name_required', 'dNSNameRequired'),
-                ('ip_address_required', 'iPAddressRequired'),
-            ]:
-                if ftpConf[f'tls_opt_{k}']:
-                    tls_options.append(v)
-
-            assert set(tls_options) == set(parsed['TLSOptions'][1].split()), \
-                f"--- Unexpected difference ---\ntls_options:\n{set(tls_options)}"\
-                f"\nparsed['TLSOptions']\n{set(parsed['TLSOptions'][1].split())}"
-        assert ftpConf['tls_policy'] == parsed['TLSRequired'][1]
-        # Do a sanity check on the certificate entries
-        assert 'TLSRSACertificateFile' in parsed
-        assert 'TLSRSACertificateKeyFile' in parsed
-    # Return the current welcome message
-    return ftpConf, motd
-
-
-@contextlib.contextmanager
-def ftp_configure(changes=None):
-    '''
-    Apply requested FTP configuration changes.
-    Restore original setting when done
-    '''
-    changes = changes or {}
-    ftpConf = call('ftp.config')
-    restore_keys = set(ftpConf) & set(changes)
-    restore_items = {key: ftpConf[key] for key in restore_keys}
-    if changes:
-        try:
-            call('ftp.update', changes)
-            yield
-        finally:
-            # Restore settings
-            call('ftp.update', restore_items)
-            # Validate the restore
-            validate_proftp_conf()
-
-
-def ftp_set_service_enable_state(state=None):
-    '''
-    Get and return the current state struct
-    Set the requested state
-    '''
-    restore_setting = None
-    if state is not None:
-        assert isinstance(state, bool)
-        # save current setting
-        restore_setting = query_ftp_service()['enable']
-        # update to requested setting
-        call('service.update', 'ftp', {'enable': state})
-
-    return restore_setting
-
-
-@contextlib.contextmanager
-def ftp_server(service_state=None):
-    '''
-    Start FTP server with current config
-    Stop server when done
-    '''
-    # service 'enable' state
-    if service_state is not None:
-        restore_state = ftp_set_service_enable_state(service_state)
-
-    try:
-        # Start FTP service
-        call('service.start', 'ftp', {'silent': False})
-        yield
-    finally:
-        # proftpd can core dump if stopped while it's busy
-        # processing a prior config change. Give it a sec.
-        sleep(1)
-        call('service.stop', 'ftp', {'silent': False})
-        # Restore original service state
-        if service_state is not None:
-            ftp_set_service_enable_state(restore_state)
-
-
-@contextlib.contextmanager
-def ftp_anon_ds_and_srvr_conn(dsname='ftpdata', FTPconfig=None, useFTPS=None, withConn=None, **kwargs):
-    FTPconfig = FTPconfig or {}
-    withConn = withConn or True
-
-    with dataset_asset(dsname, **kwargs) as ds:
-        ds_path = f"/mnt/{ds}"
-
-        # Add files and dirs
-        ftp_dirs_and_files = INIT_DIRS_AND_FILES.copy()
-        ftp_dirs_and_files['path'] = ds_path
-        ftp_init_dirs_and_files(ftp_dirs_and_files)
-
-        with ftp_server():
-            anon_config = {
-                "onlyanonymous": True,
-                "anonpath": ds_path,
-                "onlylocal": False,
-                **FTPconfig
-            }
-            with ftp_configure(anon_config):
-                ftpConf, motd = validate_proftp_conf()
-                if withConn:
-                    with (ftps_connection if useFTPS else ftp_connection)(truenas_server.ip) as ftp:
-                        yield SimpleNamespace(ftp=ftp, dirs_and_files=ftp_dirs_and_files,
-                                              ftpConf=ftpConf, motd=motd)
-
-
-@contextlib.contextmanager
-def ftp_user_ds_and_srvr_conn(dsname='ftpdata', username="FTPlocal", FTPconfig=None, useFTPS=False, **kwargs):
-    FTPconfig = FTPconfig or {}
-
-    with dataset_asset(dsname, **kwargs) as ds:
-        ds_path = f"/mnt/{ds}"
-        with ftp_user({
-            "username": username,
-            "group_create": True,
-            "home": ds_path,
-            "full_name": username + " User",
-            "password": "secret",
-            "home_create": False,
-            "smb": False,
-            "groups": [call('group.query', [['name', '=', 'ftp']], {'get': True})['id']],
-        }):
-            # Add dirs and files
-            ftp_dirs_and_files = INIT_DIRS_AND_FILES.copy()
-            ftp_dirs_and_files['path'] = ds_path
-            ftp_init_dirs_and_files(ftp_dirs_and_files)
-
-            with ftp_server():
-                with ftp_configure(FTPconfig):
-                    ftpConf, motd = validate_proftp_conf()
-                    with (ftps_connection if useFTPS else ftp_connection)(truenas_server.ip) as ftp:
-                        yield SimpleNamespace(ftp=ftp, dirs_and_files=ftp_dirs_and_files, ftpConf=ftpConf, motd=motd)
-
-
-def ftp_get_users():
-    '''
-    Return a list of active users
-    NB: ftp service should be running when called
-    '''
-    ssh_out = SSH_TEST("ftpwho -o json", user, password)
-    assert ssh_out['result'], str(ssh_out)
-    output = ssh_out['output']
-    # Strip off trailing bogus data
-    joutput = output[:output.rindex('}') + 1]
-    whodata = json.loads(joutput)
-    return whodata['connections']
-
-
-# For resume xfer test
-def upload_partial(ftp, src, tgt, NumKiB=128):
-    with open(src, 'rb') as file:
-        ftp.voidcmd('TYPE I')
-        with ftp.transfercmd(f'STOR {os.path.basename(tgt)}', None) as conn:
-            blksize = NumKiB // 8
-            for xfer in range(0, 8):
-                # Send some of the file
-                buf = file.read(1024 * blksize)
-                assert buf, "Unexpected local read error"
-                conn.sendall(buf)
-
-
-def download_partial(ftp, src, tgt, NumKiB=128):
-    with open(tgt, 'wb') as file:
-        ftp.voidcmd('TYPE I')
-        with ftp.transfercmd(f'RETR {os.path.basename(src)}', None) as conn:
-            NumXfers = NumKiB // 8
-            for xfer in range(0, NumXfers):
-                # Receive and write some of the file
-                data = conn.recv(8192)
-                assert data, "Unexpected receive error"
-                file.write(data)
-
-
-def ftp_upload_binary_file(ftpObj, source, target, offset=None):
-    """
-    Upload a file to the FTP server
-    INPUT:
-        source is the full-path to local file
-        target is the name to use on the FTP server
-    RETURN:
-        Elapsed time to upload file
-
-    """
-    assert ftpObj is not None
-    assert source is not None
-    assert target is not None
-
-    with open(source, 'rb') as fp:
-        if offset:
-            fp.seek(offset)
-        start = timer()
-        ftpObj.storbinary(f'STOR {os.path.basename(target)}', fp, rest=offset)
-        et = timer() - start
-        return et
-
-
-def ftp_download_binary_file(ftpObj, source, target, offset=None):
-    """
-    Download a file from the FTP server
-    INPUT:
-        source is the name of the file on the FTP server
-        target is full-path name on local host
-    RETURN:
-        Elapsed time to download file
-    """
-    assert ftpObj is not None
-    assert source is not None
-    assert target is not None
-    opentype = 'ab' if offset else 'wb'
-
-    with open(target, opentype) as fp:
-        start = timer()
-        ftpObj.retrbinary(f'RETR {os.path.basename(source)}', fp.write, rest=offset)
-        et = timer() - start
-        return et
-
-
-def ftp_create_local_file(LocalPathName="", content=None):
-    '''
-    Create a local file
-    INPUT:
-        If 'content' is:
-        - None, then create with touch
-        - 'int', then it represents the size in KiB to fill with random data
-        - 'str', then write that to the file
-        If 'content is not None, 'int' or 'str', then assert
-    RETURN:
-        tuple: (size_in_bytes, sha256_checksum)
-    '''
-    assert LocalPathName != "", "empty file name"
-    b = '' if isinstance(content, str) else 'b'
-    # Create a local file
-    with open(LocalPathName, 'w' + b) as f:
-        if (content is None) or isinstance(content, str):
-            content = content or ""
-            f.write(content)
-        elif isinstance(content, int):
-            f.write(os.urandom(1024 * content))
-        else:
-            assert True, f"Cannot create with content: '{content}'"
-    # Confirm existence
-    assert os.path.exists(LocalPathName)
-    localsize = os.path.getsize(LocalPathName)
-
-    res = subprocess.run(["sha256sum", LocalPathName], capture_output=True)
-    local_chksum = res.stdout.decode().split()[0]
-    return (localsize, local_chksum)
-
-
-def ftp_create_remote_file(RemotePathName="", content=None):
-    '''
-    Create a remote file
-    INPUT:
-        If 'content' is:
-        - None, then create with touch
-        - 'int', then it represents the size in KiB to fill with random data
-        - 'str', then write that to the file
-        If 'content is not None, 'int' or 'str', then assert
-    RETURN:
-        tuple: (size_in_bytes, sha256_checksum)
-    '''
-    assert RemotePathName != "", "empty file name"
-    if content is None:
-        ssh(f'touch {RemotePathName}')
-    elif isinstance(content, int):
-        ssh(f"dd if=/dev/urandom of={RemotePathName} bs=1K count={content}", complete_response=True)
-    elif isinstance(content, str):
-        ssh(f'echo "{content}" > {RemotePathName}')
-    else:
-        assert True, f"Cannot create with content: '{content}'"
-
-    # Get and return the details
-    remotesize = ssh(f"du -b {RemotePathName}").split()[0]
-    remote_chksum = ssh(f"sha256sum {RemotePathName}").split()[0]
-    return (remotesize, remote_chksum)
-
-
-def ftp_init_dirs_and_files(items=None):
-    if items is not None:
-        assert items['path'] is not None
-        path = items['path']
-        for d in items['dirs']:
-            res = SSH_TEST(f"mkdir -p {path}/{d['name']}", user, password)
-            assert res['result'], str(res)
-            thispath = f"{path}/{d['name']}"
-            if 'contents' in d:
-                for f in d['contents']:
-                    res = SSH_TEST(f"touch {thispath}/{f}", user, password)
-                    assert res['result'], str(res)
-            if 'perm' in d:
-                res = SSH_TEST(f"chmod {d['perm']} {thispath}", user, password)
-                assert res['result'], str(res)
-
-        for f in items['files']:
-            res = SSH_TEST(f"echo \'{f['contents']}\' > \'{path}/{f['name']}\'", user, password)
-            assert res['result'], str(res)
-            if 'perm' in f:
-                res = SSH_TEST(f"chmod {f['perm']} {path}/{f['name']}", user, password)
-                assert res['result'], str(res)
-
-
-def init_test_data(type='unknown', data=None):
-    assert data is not None
-    new_test_data = {}
-    new_test_data['type'] = type
-    new_test_data['ftp'] = data.ftp
-    new_test_data['ftpConf'] = data.ftpConf
-    new_test_data['motd'] = data.motd
-    new_test_data['dirs_and_files'] = data.dirs_and_files
-    return new_test_data
-
-
-def ftp_ipconnections_test(test_data=None, *extra):
-    '''
-    Test FTP MaxConnectionsPerHost conf setting.
-    The DB equivalent is ipconnections.
-    NB1: This is called with an existing connection
-    '''
-    assert test_data['ftp'] is not None
-    ftpConf = test_data['ftpConf']
-    ConnectionLimit = int(ftpConf['ipconnections'])
-    # We already have one connection
-    NumConnects = 1
-    NewConnects = []
-    while NumConnects < ConnectionLimit:
-        try:
-            ftpConn = ftp_connect(truenas_server.ip)
-        except all_errors as e:
-            assert False, f"Unexpected connection error: {e}"
-        NewConnects.append(ftpConn)
-        NumConnects += 1
-        CurrentFtpUsers = ftp_get_users()
-        assert len(CurrentFtpUsers) == ConnectionLimit
-    try:
-        # This next connect should fail
-        ftp_connect(truenas_server.ip)
-    except all_errors as e:
-        # An expected error
-        assert NumConnects == ConnectionLimit
-        assert e.args[0].startswith('530')
-        assert f"maximum number of connections ({ConnectionLimit})" in e.args[0]
-    finally:
-        # Clean up extra connections
-        for conn in NewConnects:
-            conn.quit()
-
-
-def ftp_dir_listing_test(test_data=None, *extra):
-    '''
-    Get a directory listing
-    '''
-
-    assert test_data is not None
-    ftp = test_data['ftp']
-    listing = [name for name, facts in list(ftp.mlsd())]
-    expected = test_data['dirs_and_files']
-    # Get expected
-    for f in expected['files']:
-        assert f['name'] in listing, f"Did not find {f['name']}"
-    for d in expected['dirs']:
-        assert f['name'] in listing, f"Did not find {f['name']}"
-
-
-def ftp_download_files_test(test_data=None, run_data=None):
-    '''
-    Retrieve files from server and confirm contents
-    '''
-
-    assert test_data is not None
-    ftp = test_data['ftp']
-    expected_contents = None
-    for f in run_data:
-        if f['contents'] is None:
-            continue
-        expected_contents = f['contents']
-        found_contents = []
-        cmd = f"RETR {f['name']}"
-        try:
-            res = ftp.retrlines(cmd, found_contents.append)
-            assert f['expect_to_pass'] is True, \
-                f"Expected file download failure for {f['name']}, but passed: {f}"
-            assert res.startswith('226 Transfer complete'), "Detected download failure"
-            assert expected_contents in found_contents
-        except all_errors as e:
-            assert f['expect_to_pass'] is False, \
-                f"Expected file download success for {f['name']}, but failed: {e.args}"
-
-
-def ftp_upload_files_test(test_data=None, run_data=None):
-    '''
-    Upload files to the server
-    '''
-    localfile = "/tmp/ftpfile"
-
-    assert test_data is not None
-    assert run_data != []
-    ftp = test_data['ftp']
-    try:
-        for f in run_data:
-            if 'content' in f and isinstance(f['content'], str):
-                ftp_create_local_file(localfile, f['content'])
-            with open(localfile, 'rb') as tmpfile:
-                try:
-                    cmd = f"STOR {f['name']}"
-                    res = ftp.storlines(cmd, tmpfile)
-                    assert f['expect_to_pass'] is True, \
-                        f"Expected file add failure for {f['name']}, but passed: {f}"
-                    assert res.startswith('226 Transfer complete'), "Detected upload failure"
-                except all_errors as e:
-                    assert f['expect_to_pass'] is False, \
-                        f"Expected file add success for {f['name']}, but failed: {e.args}"
-    finally:
-        # Clean up
-        if os.path.exists(localfile):
-            os.remove(localfile)
-
-
-def ftp_delete_files_test(test_data=None, run_data=None):
-    '''
-    Delete files on the server
-    '''
-    assert test_data is not None
-    assert run_data != []
-    ftp = test_data['ftp']
-    for f in run_data:
-        try:
-            ftp.delete(f['name'])
-            assert f['expect_to_pass'] is True, \
-                f"Expected file delete failure for {f['name']}, but passed: {f}"
-        except all_errors as e:
-            assert f['expect_to_pass'] is False, \
-                f"Expected file delete success for {f['name']}, but failed: {e.args}"
-
-
-def ftp_add_dirs_test(test_data=None, run_data=None):
-    '''
-    Create directories on the server
-    '''
-    assert test_data is not None
-    assert run_data != []
-    ftp = test_data['ftp']
-    for d in run_data:
-        try:
-            res = ftp.mkd(d['name'])
-            assert d['name'] in res
-        except all_errors as e:
-            assert d['expect_to_pass'] is False, \
-                f"Expected deletion success for {d['name']}, but failed: {e.args}"
-
-
-def ftp_remove_dirs_test(test_data=None, run_data=None):
-    '''
-    Delete directories on the server
-    '''
-    assert test_data is not None
-    assert run_data != []
-    ftp = test_data['ftp']
-    for d in run_data:
-        try:
-            ftp.rmd(d['name'])
-            assert d['expect_to_pass'] is True, \
-                f"Expected deletion failure for {d['name']}, but passed: {d}"
-        except all_errors as e:
-            assert d['expect_to_pass'] is False, \
-                f"Expected deletion success for {d['name']}, but failed: {e.args}"
-
-#
-# ================== TESTS =========================
-#
-
-
-@pytest.mark.dependency(name='init_dflt_config')
-def test_001_validate_default_configuration(request, ftp_init_db_dflt):
-    '''
-    Confirm the 'default' settings in the DB are in sync with what
-    is specified in the FTPModel class.  These can get out of sync
-    with migration code.
-    NB1: This expects FTP to be in the default configuration
-    '''
-    ftp_set_config(DB_DFLT)
-
-    with ftp_server():
-        # Get the DB settings
-        db = call('ftp.config')
-
-        # Check each setting
-        diffs = {}
-        for setting in set(DB_DFLT) & set(db):
-            # Special cases: ftp_anonpath is 'nullable' in the DB, but the default is False
-            if setting == "anonpath" and (db[setting] == '' or db[setting] is None):
-                db[setting] = False
-            # Special cases: Restore 'None' for empty string
-            if setting in ['banner', 'options', 'masqaddress'] and db[setting] == '':
-                db[setting] = None
-
-            if DB_DFLT[setting] != db[setting]:
-                diffs.update({setting: [DB_DFLT[setting], db[setting]]})
-
-        assert len(diffs) == 0, f"Found mismatches: [DB_DFLT, db]\n{diffs}"
-
-
-def test_005_ftp_service_at_boot(request):
-    '''
-    Confirm we can enable FTP service at boot and restore current setting
-    '''
-    # Get the current state and set the new state
-    restore_setting = ftp_set_service_enable_state(True)
-    assert restore_setting is False, f"Unexpected service at boot setting: enable={restore_setting}, expected False"
-
-    # Confirm we toggled the setting
-    res = query_ftp_service()['enable']
-    assert res is True, res
-
-    # Restore original setting
-    ftp_set_service_enable_state(restore_setting)
-
-
-def test_010_ftp_service_start(request):
-    '''
-    Confirm we can start the FTP service with the default config
-    Confirm the proftpd.conf file was generated
-    '''
-    # Start FTP service
-    with ftp_server():
-        # Validate the service is running via our API
-        assert query_ftp_service()['state'] == 'RUNNING'
-
-        # Confirm we have /etc/proftpd/proftpd.conf
-        rv_conf = SSH_TEST("ls /etc/proftpd/proftpd.conf", user, password)
-        assert rv_conf['result'], str(rv_conf)
-
-
-def test_015_ftp_configuration(request):
-    '''
-    Confirm config changes get reflected in proftpd.conf
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-
-    with ftp_server():
-        changes = {
-            'clients': 100,
-            'ipconnections': 10,
-            'loginattempt': 100,
-            'banner': 'A banner to remember',
-            'onlylocal': True,
-            'fxp': True
-        }
-        with ftp_configure(changes):
-            validate_proftp_conf()
-
-
-def test_017_ftp_port(request):
-    '''
-    Confirm config changes get reflected in proftpd.conf
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-
-    with ftp_server():
-        assert query_ftp_service()['state'] == 'RUNNING'
-
-        # Confirm FTP is listening on the default port
-        res = SSH_TEST("ss -tlpn", user, password)
-        sslist = res['output'].splitlines()
-        ftp_entry = [line for line in sslist if "ftp" in line]
-        ftpPort = ftp_entry[0].split()[3][2:]
-        assert ftpPort == "21", f"Expected default FTP port, but found {ftpPort}"
-
-        # Test port change
-        changes = {'port': 22222}
-        with ftp_configure(changes):
-            validate_proftp_conf()
-            res = SSH_TEST("ss -tlpn", user, password)
-            sslist = res['output'].splitlines()
-            ftp_entry = [line for line in sslist if "ftp" in line]
-            ftpPort = ftp_entry[0].split()[3][2:]
-            assert ftpPort == "22222", f"Expected '22222' FTP port, but found {ftpPort}"
-
-
-# @pytest.mark.parametrize("NumTries,expect_to_pass"m )
-@pytest.mark.parametrize('NumFailedTries,expect_to_pass', [
-    (2, True),
-    (3, False)
-])
-def test_020_login_attempts(request, NumFailedTries, expect_to_pass):
-    '''
-    Test our ability to change and trap excessive failed login attempts
-    1) Test good password before running out of tries
-    2) Test good password after running out of tries
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-    login_setup = {
-        "onlylocal": True,
-        "loginattempt": 3,
-    }
-    with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPfatfingeruser', login_setup) as loginftp:
-        MaxTries = loginftp.ftpConf['loginattempt']
-        ftpObj = loginftp.ftp
-        for login_attempt in range(0, NumFailedTries):
-            try:
-                # Attempt login with bad password
-                ftpObj.login(user='FTPfatfingeruser', passwd="secrfet")
-            except all_errors as all_e:
-                assert True, f"Unexpected login failure: {all_e}"
-            except EOFError as eof_e:
-                assert True, f"Unexpected disconnect: {eof_e}"
-        if expect_to_pass:
-            # Try with correct password
-            ftpObj.login(user='FTPfatfingeruser', passwd="secret")
-            assert expect_to_pass is True
-        else:
-            with pytest.raises(Exception):
-                # Try with correct password, but already exceeded number of tries
-                ftpObj.login(user='FTPfatfingeruser', passwd="secret")
-                assert login_attempt < MaxTries, "Failed to limit login attempts"
-
-
-def test_030_root_login(request):
-    '''
-    "Allow Root Login" setting has been removed.
-    Confirm we block root login.
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-    with ftp_anon_ds_and_srvr_conn('anonftpDS') as ftpdata:
-        ftpObj = ftpdata.ftp
-        try:
-            res = ftpObj.login(user, password)
-            assert True, f"Unexpected behavior: root login was supposed to fail, but login response is {res}"
-
-        except all_errors:
-            pass
-
-
-@pytest.mark.parametrize('setting,ftpConfig', [
-    (True, {"onlyanonymous": True, "anonpath": "anonftpDS", "onlylocal": False}),
-    (False, {"onlyanonymous": False, "anonpath": "", "onlylocal": True}),
-])
-def test_031_anon_login(request, setting, ftpConfig):
-    '''
-    Test the WebUI "Allow Anonymous Login" setting.
-    In our DB the setting is "onlyanonymous" and an "Anonymous" section in proftpd.conf.
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-    if setting is True:
-        # Fixup anonpath
-        ftpConfig['anonpath'] = f"/mnt/{pool_name}/{ftpConfig['anonpath']}"
-    with ftp_anon_ds_and_srvr_conn('anonftpDS', ftpConfig) as ftpdata:
-        ftpObj = ftpdata.ftp
-        try:
-            res = ftpObj.login()
-            assert setting is True, \
-                f"Unexpected behavior: onlyanonymous={ftpConfig['onlyanonymous']}, but login successfull: {res}"
-
-            # The following assumes the login was successfull
-            assert res.startswith('230')
-            ftpusers = ftp_get_users()
-            assert 'ftp' == ftpusers[0]['user']
-        except all_errors as e:
-            assert setting is False, f"Unexpected failure, onlyanonymous={setting}, but got {e}"
-
-
-@pytest.mark.parametrize('localuser,expect_to_pass', [
-    ("FTPlocaluser", True),
-    ("BadUser", False)
-])
-def test_032_local_login(request, localuser, expect_to_pass):
-    depends(request, ["init_dflt_config"], scope="session")
-    with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', {"onlylocal": True}) as ftpdata:
-        ftpObj = ftpdata.ftp
-        try:
-            ftpObj.login(localuser, 'secret')
-            assert expect_to_pass, f"Unexpected behavior: {user} should not have been allowed to login"
-        except all_errors as e:
-            assert not expect_to_pass, f"Unexpected behavior: {user} should have been allowed to login. {e}"
-
-
-def test_040_reverse_dns(request):
-    depends(request, ["init_dflt_config"], scope="session")
-    ftp_conf = {"onlylocal": True, "reversedns": True}
-    with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', ftp_conf) as ftpdata:
-        ftpObj = ftpdata.ftp
-        try:
-            ftpObj.login('FTPlocaluser', 'secret')
-        except all_errors as e:
-            assert False, f"Login failed with reverse DNS enabled. {e}"
-
-
-@pytest.mark.parametrize('masq_type, expect_to_pass',
-                         [("hostname", True), ("ip_addr", True), ("invalid.domain", False)])
-def test_045_masquerade_address(request, masq_type, expect_to_pass):
-    '''
-    TrueNAS tooltip:
-        Public IP address or hostname. Set if FTP clients cannot connect through a NAT device.
-    We test masqaddress with: hostname, IP address and an invalid fqdn.
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-    netconfig = call('network.configuration.config')
-    if masq_type == 'hostname':
-        masqaddr = netconfig['hostname']
-        if netconfig['domain'] and netconfig['domain'] != "local":
-            masqaddr = masqaddr + "." + netconfig['domain']
-    elif masq_type == 'ip_addr':
-        masqaddr = truenas_server.ip
-    else:
-        masqaddr = masq_type
-
-    ftp_conf = {"onlylocal": True, "masqaddress": masqaddr}
-    with pytest.raises(Exception) if not expect_to_pass else contextlib.nullcontext():
-        with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', ftp_conf) as ftpdata:
-            ftpObj = ftpdata.ftp
-            try:
-                ftpObj.login('FTPlocaluser', 'secret')
-                res = ftpObj.sendcmd('PASV')
-                assert res.startswith("227 Entering Passive Mode")
-                srvr_ip, p1, p2 = res.split('(', 1)[1].split(')')[0].rsplit(',', 2)
-                srvr_ip = srvr_ip.replace(',', '.')
-                # If the masquerade is our hostname the presented IP address will
-                # be the 'local' IP address
-                if masq_type == "hostname":
-                    assert srvr_ip == '127.0.0.1'
-                else:
-                    assert srvr_ip == truenas_server.ip
-            except all_errors as e:
-                assert False, f"FTP failed with masqaddres = '{masqaddr}'. {e}"
-
-
-@pytest.mark.parametrize('testing,ftpConfig,expect_to_pass', [
-    ("config", {"passiveportsmin": 100}, False),
-    ("config", {"passiveportsmin": 3000, "passiveportsmax": 2000}, False),
-    ("config", {"passiveportsmin": 2000, "passiveportsmax": 2000}, False),
-    ("run", {"passiveportsmin": 22222, "passiveportsmax": 22223}, True),
-])
-def test_050_passive_ports(request, testing, ftpConfig, expect_to_pass):
-    '''
-    Test the passive port range setting.
-    NB: The proFTPd documentation for this setting states:
-        | Should no open ports be found within the configured range, the server will default
-        | to a random kernel-assigned port, and a message logged.
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-    if testing == 'config':
-        try:
-            with ftp_configure(ftpConfig):
-                assert expect_to_pass is True
-        except Exception as e:
-            assert expect_to_pass is False, f"{e['error']}"
-    else:
-        with ftp_anon_ds_and_srvr_conn('anonftpDS', ftpConfig) as ftpdata:
-            ftpObj = ftpdata.ftp
-            try:
-                res = ftpObj.login()
-                # The confirm the login was successfull
-                assert res.startswith('230')
-                res = ftpObj.sendcmd('PASV')
-                assert res.startswith("227 Entering Passive Mode")
-                # The response includes the server IP and passive port
-                # Convert '227 Entering Passive Mode (a,b,c,d,e,f)' to ['a,b,c,d', 'e', 'f']
-                srvr_ip, p1, p2 = res.split('(', 1)[1].split(')')[0].rsplit(',', 2)
-                # Calculate the passive port
-                pasv_port = int(p1) * 256 + int(p2)
-                assert srvr_ip.replace(',', '.') == truenas_server.ip
-                assert pasv_port == ftpdata.ftpConf['passiveportsmin']
-            except all_errors as e:
-                assert expect_to_pass is False, f"Unexpected failure, {e}"
-
-
-def test_055_no_activity_timeout(request):
-    '''
-    Test the WebUI "Timeout" setting.  In our DB it is "timeout" and "TimeoutIdle" in proftpd.conf.
-        | The TimeoutIdle directive configures the maximum number of seconds that proftpd will
-        ! allow clients to stay connected without receiving any data on either the control or data connection
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-    with ftp_anon_ds_and_srvr_conn('anonftpDS', {'timeout': 3}) as ftpdata:
-        ftpObj = ftpdata.ftp
-        try:
-            ftpObj.login()
-            sleep(ftpdata.ftpConf['timeout'] + 1)
-            ftpObj.nlst()
-            assert False, "Unexpected behavior: 'Activity Timeout' did not occur.  "\
-                          "Expected listing to fail, but it succeeded."
-        except all_errors as e:
-            chkstr = f"Idle timeout ({ftpdata.ftpConf['timeout']} seconds)"
-            assert chkstr in str(e), e
-
-
-def test_056_no_xfer_timeout(request):
-    '''
-    This tests the WebUI "Notransfer Timeout" setting.  In our DB it is "timeout_notransfer"
-    and "TimeoutNoTranfer" in proftpd.conf.
-        | The TimeoutNoTransfer directive configures the maximum number of seconds a client
-        | is allowed to spend connected, after authentication, without issuing a data transfer command
-        | which results in a data connection (i.e. sending/receiving a file, or requesting a directory listing)
-    '''
-    depends(request, ["init_dflt_config"], scope="session")
-    with ftp_anon_ds_and_srvr_conn('anonftpDS', {'timeout_notransfer': 3}) as ftpdata:
-        ftpObj = ftpdata.ftp
-        try:
-            ftpObj.login()
-            sleep(ftpdata.ftpConf['timeout_notransfer'] + 1)
-            ftpObj.nlst()
-            assert False, "Unexpected behavior: 'No Transfer Timeout' did not occur.  "\
-                          "Expected listing to fail, but it succeeded."
-        except all_errors as e:
-            chkstr = f"No transfer timeout ({ftpdata.ftpConf['timeout_notransfer']} seconds)"
-            assert chkstr in str(e), e
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)  # Can sometimes getoside the range
-@pytest.mark.parametrize('testwho,ftp_setup_func', [
-    ('anon', ftp_anon_ds_and_srvr_conn),
-    ('local', ftp_user_ds_and_srvr_conn),
-])
-def test_060_bandwidth_limiter(request, testwho, ftp_setup_func):
-    FileSize = 1024  # KiB
-    ulRate = 64  # KiB
-    dlRate = 128  # KiB
-    ulConf = testwho + 'userbw'
-    dlConf = testwho + 'userdlbw'
-
-    depends(request, ["init_dflt_config"], scope="session")
-    ftp_anon_bw_limit = {
-        ulConf: ulRate,  # upload limit
-        dlConf: dlRate   # download limit
-    }
-    ftpfname = "BinaryFile"
-
-    with ftp_setup_func(FTPconfig=ftp_anon_bw_limit) as ftpdata:
-        ftpObj = ftpdata.ftp
-        localfname = f"/tmp/{ftpfname}"
-        if testwho == 'anon':
-            results = SSH_TEST(f"chown ftp {ftpdata.ftpConf['anonpath']}", user, password)
-            assert results['result'] is True, results
-        try:
-            if testwho == 'anon':
-                ftpObj.login()
-            else:
-                ftpObj.login('FTPlocal', 'secret')
-            ftpObj.voidcmd('TYPE I')
-
-            # Create local binary file
-            with open(localfname, 'wb') as f:
-                f.write(os.urandom(1024 * FileSize))
-
-            ElapsedTime = int(ftp_upload_binary_file(ftpObj, localfname, ftpfname))
-            xfer_rate = FileSize // ElapsedTime
-            # This typically will match exactly, but in actual testing this might vary
-            assert (ulRate - 8) <= xfer_rate <= (ulRate + 20), \
-                f"Failed upload rate limiter: Expected {ulRate}, but sensed rate is {xfer_rate}"
-
-            ElapsedTime = int(ftp_download_binary_file(ftpObj, ftpfname, localfname))
-            xfer_rate = FileSize // ElapsedTime
-            # Allow for variance
-            assert (dlRate - 8) <= xfer_rate <= (dlRate + 20), \
-                f"Failed download rate limiter: Expected {dlRate}, but sensed rate is {xfer_rate}"
-        except all_errors as e:
-            assert False, f"Unexpected failure: {e}"
-        finally:
-            # Clean up
-            if os.path.exists(localfname):
-                os.remove(localfname)
-
-
-@pytest.mark.parametrize('fmask,f_expect,dmask,d_expect', [
-    ("000", "0666", "000", "0777"),
-    ("007", "0660", "002", "0775"),
-])
-def test_065_umask(request, fmask, f_expect, dmask, d_expect):
-    depends(request, ["init_dflt_config"], scope="session")
-    localfile = "/tmp/localfile"
-    fname = "filemask" + fmask
-    dname = "dirmask" + dmask
-    ftp_create_local_file(localfile, "Contents of local file")
-
-    ftp_umask = {
-        'filemask': fmask,
-        'dirmask': dmask
-    }
-    with ftp_anon_ds_and_srvr_conn('anonftpDS', ftp_umask, mode='777') as ftpdata:
-        ftpObj = ftpdata.ftp
-        try:
-            ftpObj.login()
-
-            # Add file and make a directory
-            with open(localfile, 'rb') as tmpfile:
-                res = ftpObj.storlines(f'STOR {fname}', tmpfile)
-                assert "Transfer complete" in res
-
-            res = ftpObj.mkd(dname)
-            assert dname in res
-
-            ftpdict = dict(ftpObj.mlsd())
-            assert ftpdict[fname]['unix.mode'] == f_expect, ftpdict[fname]
-            assert ftpdict[dname]['unix.mode'] == d_expect, ftpdict[dname]
-
-        except all_errors as e:
-            assert False, f"Unexpected failure: {e}"
-        finally:
-            # Clean up
-            if os.path.exists(localfile):
-                os.remove(localfile)
-
-
-@pytest.mark.dependency(depends=['init_dflt_config'])
-@pytest.mark.parametrize(
-    'ftpConf,expect_to_pass', [
-        ({}, False),
-        ({'resume': True}, True)
-    ],
-    ids=[
-        "resume xfer: blocked",
-        "resume xfer: allowed"
-    ]
-)
-@pytest.mark.parametrize(
-    'direction,create_src,xfer_partial,xfer_remainder', [
-        ('upload', ftp_create_local_file, upload_partial, ftp_upload_binary_file),
-        ('download', ftp_create_remote_file, download_partial, ftp_download_binary_file)
-    ],
-    ids=[
-        "upload",
-        "download"
-    ]
-)
-def test_070_resume_xfer(
-    ftpConf, expect_to_pass, direction, create_src, xfer_partial, xfer_remainder
-):
-
-    # # ---------- helper functions ---------
-    def get_tgt_size(ftp, tgt, direction):
-        if direction == 'upload':
-            ftp.voidcmd('TYPE I')
-            return ftp.size(os.path.basename(tgt))
-        else:
-            return os.path.getsize(tgt)
-
-    def get_tgt_chksum(tgt, direction):
-        if direction == 'upload':
-            return ssh(f"sha256sum {tgt}").split()[0]
-        else:
-            res = subprocess.run(["sha256sum", tgt], capture_output=True)
-            assert res.returncode == 0
-            return res.stdout.decode().split()[0]
-
-    try:
-        # Run test
-        with ftp_anon_ds_and_srvr_conn('anonftpDS', ftpConf, withConn=False, mode='777') as ftpdata:
-            src_path = {'upload': "/tmp", 'download': f"{ftpdata.ftpConf['anonpath']}"}
-            tgt_path = {'upload': f"{ftpdata.ftpConf['anonpath']}", "download": "/tmp"}
-
-            # xfer test
-            try:
-                # Create a 1MB source binary file.
-                src_pathname = '/'.join([src_path[direction], 'srcfile'])
-                tgt_pathname = '/'.join([tgt_path[direction], 'tgtfile'])
-                src_size, src_chksum = create_src(src_pathname, 1024)
-
-                ftpObj = ftp_connect(truenas_server.ip)
-                ftpObj.login()
-                xfer_partial(ftpObj, src_pathname, tgt_pathname, 768)
-
-                # Quit to simulate loss of connection
-                try:
-                    ftpObj.quit()
-                except error_temp:
-                    # May generate a quit error that we ignore for this test
-                    pass
-                ftpObj = None
-                sleep(1)
-
-                # Attempt resume to complete the upload
-                ftpObj = ftp_connect(truenas_server.ip)
-                ftpObj.login()
-                xfer_remainder(ftpObj, src_pathname, tgt_pathname, get_tgt_size(ftpObj, tgt_pathname, direction))
-            except all_errors as e:
-                assert not expect_to_pass, f"Unexpected failure in resumed {direction} test: {e}"
-                if not expect_to_pass:
-                    assert "Restart not permitted" in str(e), str(e)
-
-            if expect_to_pass:
-                # Check upload result
-                tgt_size = get_tgt_size(ftpObj, tgt_pathname, direction)
-                assert int(tgt_size) == int(src_size), \
-                    f"Failed {direction} size test. Expected {src_size}, found {tgt_size}"
-                tgt_chksum = get_tgt_chksum(tgt_pathname, direction)
-                assert src_chksum == tgt_chksum, \
-                    f"Failed {direction} checksum test. Expected {src_chksum}, found {tgt_chksum}"
-
-    finally:
-        try:
-            [os.remove(file) for file in ['/tmp/srcfile', '/tmp/tgtfile']]
-        except OSError:
-            pass
-
-
-class UserTests:
-    """
-    Run the same suite of tests for all users
-    """
-    ftp_user_tests = [
-        (ftp_dir_listing_test, []),
-        (ftp_ipconnections_test, []),
-        (ftp_download_files_test, [
-            {'name': 'init_file', 'contents': "Contents of init_file", 'expect_to_pass': True},
-            {'name': 'init_ro_file', 'contents': "RO data", 'expect_to_pass': True},
-        ]),
-        (ftp_upload_files_test, [
-            {'name': 'DeleteMeFile', 'content': 'To be deleted', 'expect_to_pass': True},
-            {'name': 'init_ro_file', 'expect_to_pass': False},
-        ]),
-        (ftp_delete_files_test, [
-            {'name': 'DeleteMeFile', 'expect_to_pass': True},
-            {'name': 'bogus_file', 'expect_to_pass': False},
-            {'name': 'init_ro_dir/ReadOnlyDir_file1', 'expect_to_pass': False},
-        ]),
-        (ftp_add_dirs_test, [
-            {'name': 'DeleteMeDir', 'expect_to_pass': True},
-        ]),
-        (ftp_remove_dirs_test, [
-            {'name': 'DeleteMeDir', 'expect_to_pass': True},
-            {'name': 'bogus_dir', 'expect_to_pass': False},
-            {'name': 'init_ro_dir', 'expect_to_pass': False},
-        ])
-    ]
-
-    @pytest.mark.parametrize("user_test,run_data", ftp_user_tests)
-    def test_080_ftp_user(self, setup, user_test, run_data):
-        try:
-            user_test(setup, run_data)
-        except all_errors as e:
-            assert e is None, f"FTP error: {e}"
-
-
-class TestAnonUser(UserTests):
-    """
-    Create a dataset with some data to be used for anonymous FTP
-    Start FTP server configured for anonymous
-    Create an anonymous FTP connection and login
-    """
-    @pytest.fixture(scope='class')
-    def setup(self, request):
-        depends(request, ["init_dflt_config"], scope="session")
-
-        with ftp_anon_ds_and_srvr_conn('anonftpDS') as anonftp:
-            # Make the directory owned by the anonymous ftp user
-            anon_path = anonftp.dirs_and_files['path']
-            results = SSH_TEST(f"chown ftp {anon_path}", user, password)
-            assert results['result'] is True, results
-            login_error = None
-            ftpObj = anonftp.ftp
-            try:
-                res = ftpObj.login()
-                assert res.startswith('230 Anonymous access granted')
-                # anonymous clients should not get the welcome message
-                assert anonftp.motd.splitlines()[0] not in res
-
-                # Run anonymous user tests with updated data
-                yield init_test_data('Anon', anonftp)
-            except all_errors as e:
-                login_error = e
-            assert login_error is None
-
-
-class TestLocalUser(UserTests):
-
-    @pytest.fixture(scope='class')
-    def setup(self, request):
-        depends(request, ["init_dflt_config"], scope="session")
-
-        local_setup = {
-            "onlylocal": True,
-        }
-        with ftp_user_ds_and_srvr_conn('ftplocalDS', 'FTPlocaluser', local_setup) as localftp:
-            login_error = None
-            ftpObj = localftp.ftp
-            try:
-                res = ftpObj.login(user='FTPlocaluser', passwd="secret")
-                assert res.startswith('230')
-                # local users should get the welcome message
-                assert localftp.motd.splitlines()[0] in res
-                ftpusers = ftp_get_users()
-                assert "FTPlocaluser" == ftpusers[0]['user']
-
-                # Run the user tests with updated data
-                yield init_test_data('Local', localftp)
-            except all_errors as e:
-                login_error = e
-            assert login_error is None
-
-
-class TestFTPSUser(UserTests):
-
-    @pytest.fixture(scope='class')
-    def setup(self, request):
-        depends(request, ["init_dflt_config"], scope="session")
-
-        # We include tls_opt_no_session_reuse_required because python
-        # ftplib has a long running issue with support for it.
-        tls_setup = {
-            "tls": True,
-            "tls_opt_no_session_reuse_required": True,
-            "ssltls_certificate": 1
-        }
-        with ftp_user_ds_and_srvr_conn('ftpslocalDS', 'FTPSlocaluser', tls_setup, useFTPS=True) as tlsftp:
-            ftpsObj = tlsftp.ftp
-            login_error = None
-            try:
-                res = ftpsObj.login(user='FTPSlocaluser', passwd="secret")
-                assert res.startswith('230')
-                # local users should get the welcome message
-                assert tlsftp.motd.splitlines()[0] in res
-                ftpusers = ftp_get_users()
-                assert "FTPSlocaluser" == ftpusers[0]['user']
-
-                # Run the user tests with updated data
-                yield init_test_data('FTPS', tlsftp)
-            except all_errors as e:
-                login_error = e
-            assert login_error is None
-
-
-@pytest.mark.skip(reason="Enable this when Jenkins infrastructure is better able to handle this test")
-def test_085_ftp_service_starts_after_reboot():
-    '''
-    NAS-123024
-    There is a bug in the Debian Bookwork proftpd install package
-    that enables proftpd.socket which blocks proftpd.service from starting.
-
-    We fixed this by disabling proftpd.socket. There is a different fix
-    in a Bookworm update that involves refactoring the systemd unit files.
-    '''
-    with ftp_server(True):  # start ftp and configure it to start at boot
-        rv = query_ftp_service()
-        assert rv['state'] == 'RUNNING'
-        assert rv['enable'] is True
-
-        reboot(truenas_server.ip)
-
-        # wait for box to reboot
-        max_wait = 60
-        ftp_state = None
-        for retry in range(max_wait):
-            try:
-                ftp_state = query_ftp_service()
-                break
-            except Exception:
-                sleep(1)
-                continue
-
-        # make sure ftp service started after boot
-        assert ftp_state, 'Failed to query ftp service state after {max_wait!r} seconds'
-        assert ftp_state['state'] == 'RUNNING', f'Expected ftp service to be running, found {ftp_state["state"]!r}'
-
-
-def test_100_ftp_service_stop():
-    call('service.stop', 'ftp', {'silent': False})
-    rv = query_ftp_service()
-    assert rv['state'] == 'STOPPED'
-    assert rv['enable'] is False
diff --git a/tests/api2/test_260_iscsi.py b/tests/api2/test_260_iscsi.py
deleted file mode 100644
index e21806f0b6020..0000000000000
--- a/tests/api2/test_260_iscsi.py
+++ /dev/null
@@ -1,383 +0,0 @@
-import random
-import string
-from time import sleep
-
-import pytest
-from assets.websocket.iscsi import initiator, portal, target, target_extent_associate
-from auto_config import hostname, pool_name
-from functions import SSH_TEST
-
-from middlewared.test.integration.assets.iscsi import iscsi_extent
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-try:
-    from config import BSD_HOST, BSD_PASSWORD, BSD_USERNAME
-    have_bsd_host_cfg = True
-except ImportError:
-    have_bsd_host_cfg = False
-
-pytestmark = pytest.mark.skipif(not have_bsd_host_cfg, reason='BSD host configuration is missing in ixautomation.conf')
-
-digit = ''.join(random.choices(string.digits, k=2))
-
-file_mountpoint = f'/tmp/iscsi-file-{hostname}'
-zvol_mountpoint = f'/tmp/iscsi-zvol-{hostname}'
-target_name = f"target{digit}"
-basename = "iqn.2005-10.org.freenas.ctl"
-zvol_name = f"ds{digit}"
-zvol = f'{pool_name}/{zvol_name}'
-zvol_url = zvol.replace('/', '%2F')
-
-
-def has_session_present(target):
-    results = call('iscsi.global.sessions', [['target', '=', target]])
-    assert isinstance(results, list), results
-    return bool(len(results))
-
-
-def waiting_for_iscsi_to_disconnect(base_target, wait):
-    timeout = 0
-    # First check that the client no longer sees the target logged in
-    while timeout < wait:
-        cmd = 'iscsictl -L'
-        results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        if base_target not in results['output']:
-            break
-        timeout += 1
-        sleep(1)
-    # Next check that the SCALE does not see a session to the target
-    while timeout < wait:
-        if not has_session_present(base_target):
-            return True
-        timeout += 1
-        sleep(1)
-    else:
-        return False
-
-
-def wait_for_iscsi_connection_before_grabbing_device_name(iqn, wait=60):
-    timeout = 0
-    device_name = ""
-    while timeout < wait:
-        cmd = f'iscsictl -L | grep {iqn}'
-        results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        if results['result'] and "Connected:" in results['output']:
-            device_name = results['stdout'].strip().split()[3]
-            if device_name.startswith('probe'):
-                timeout += 1
-                sleep(1)
-                continue
-            assert True
-            break
-        timeout += 1
-        sleep(1)
-    while timeout < wait:
-        cmd = f'test -e /dev/{device_name}'
-        results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        if results['result']:
-            assert True
-            break
-        timeout += 1
-        sleep(1)
-    assert timeout < wait, f"Timed out waiting {wait} seconds for {iqn} to surface"
-    return device_name
-
-
-@pytest.fixture(scope='module')
-def fix_initiator():
-    with initiator() as config:
-        yield config
-
-
-@pytest.fixture(scope='module')
-def fix_portal():
-    with portal() as config:
-        yield {'portal': config}
-
-
-@pytest.fixture(scope='module')
-def fix_iscsi_enabled():
-    payload = {"enable": True}
-    config = call('service.update', 'iscsitarget', payload)
-    try:
-        yield config
-    finally:
-        payload = {"enable": False}
-        config = call('service.update', 'iscsitarget', payload)
-
-
-@pytest.fixture(scope='module')
-def fix_iscsi_started(fix_iscsi_enabled):
-    call('service.start', 'iscsitarget')
-    sleep(1)
-    try:
-        yield
-    finally:
-        call('service.stop', 'iscsitarget')
-
-
-def test_add_iscsi_initiator(fix_initiator):
-    result = call('iscsi.initiator.query')
-    assert len(result) == 1, result
-    assert result[0]['comment'] == 'Default initiator', result
-
-
-def test_add_iscsi_portal(fix_portal):
-    result = call('iscsi.portal.query')
-    assert len(result) == 1, result
-    assert result[0]['listen'][0]['ip'] == '0.0.0.0', result
-
-
-def test_enable_iscsi_service(fix_iscsi_enabled):
-    pass
-
-
-def test_start_iscsi_service(fix_iscsi_started):
-    result = call('service.query', [['service', '=', 'iscsitarget']], {'get': True})
-    assert result["state"] == "RUNNING", result
-
-
-class FileExtent:
-
-    @pytest.fixture(scope='class')
-    def fix_extent(self):
-        filepath = f'/mnt/{pool_name}/iscsi_file_extent'
-        data = {
-            'type': 'FILE',
-            'name': 'extent',
-            'filesize': 536870912,
-            'path': filepath
-        }
-        try:
-            with iscsi_extent(data) as config:
-                yield config
-        finally:
-            ssh(f'rm -f {filepath}')
-
-
-class ZvolExtent:
-
-    @pytest.fixture(scope='class')
-    def fix_extent(self):
-        zvol_data = {
-            'type': 'VOLUME',
-            'volsize': 655360,
-            'volblocksize': '16K'
-        }
-        with dataset(zvol_name, zvol_data, pool_name):
-            extent_data = {
-                'type': 'DISK',
-                'disk': f'zvol/{zvol}',
-                'name': 'zvol_extent',
-            }
-            with iscsi_extent(extent_data) as config:
-                yield config
-
-
-class Target:
-
-    @pytest.fixture(scope='class')
-    def fix_target(self, fix_portal):
-        result = {}
-        result.update(fix_portal)
-        with target(self.TARGET_NAME, [{'portal': fix_portal['portal']['id']}]) as config:
-            result.update({'target': config})
-            result.update({'iqn': f'{basename}:{self.TARGET_NAME}'})
-            yield result
-
-    @pytest.fixture(scope='class')
-    def fix_targetextent(self, fix_target, fix_extent):
-        result = {}
-        result.update(fix_target)
-        result.update(fix_extent)
-        with target_extent_associate(fix_target['target']['id'], fix_extent['id'], 1) as config:
-            result.update({'targetextent': config})
-            yield result
-
-    def test_add_iscsi_target(self, fix_target):
-        result = call('iscsi.target.query', [['name', '=', fix_target['target']['name']]])
-        assert len(result) == 1, result
-
-    def test_add_iscsi_file_extent(self, fix_extent):
-        result = call('iscsi.extent.query')
-        assert len(result) == 1, result
-
-    def test_associate_iscsi_target(self, fix_targetextent):
-        result = call('iscsi.targetextent.query')
-        assert len(result) == 1, result
-
-
-class LoggedInTarget:
-
-    @pytest.fixture(scope='class')
-    def fix_connect_to_target(self, fix_iscsi_started, fix_targetextent):
-        iqn = fix_targetextent['iqn']
-        cmd = f'iscsictl -A -p {truenas_server.ip}:3260 -t {iqn}'
-        results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-        try:
-            yield fix_targetextent
-        finally:
-            cmd = f'iscsictl -R -t {iqn}'
-            results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-            assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-            # Currently FreeBSD (13.1-RELEASE-p5) does *not* issue a LOGOUT (verified by
-            # network capture), so give the target time to react. SCST will log an error, e.g.
-            # iscsi-scst: ***ERROR***: Connection 00000000e749085f with initiator iqn.1994-09.org.freebsd:freebsd13.local unexpectedly closed!
-            assert waiting_for_iscsi_to_disconnect(f'{iqn}', 30)
-
-    @pytest.fixture(scope='class')
-    def fix_target_surfaced(self, fix_connect_to_target):
-        result = {}
-        result.update(fix_connect_to_target)
-        iqn = fix_connect_to_target['iqn']
-        device_name = wait_for_iscsi_connection_before_grabbing_device_name(iqn)
-        assert device_name != ""
-        result.update({'device': device_name})
-        yield result
-
-    def test_connect_to_iscsi_target(self, fix_connect_to_target):
-        pass
-
-    def test_target_surfaced(self, fix_target_surfaced):
-        pass
-
-
-class Formatted:
-    @pytest.fixture(scope='class')
-    def fix_format_target_volume(self, fix_target_surfaced):
-        device_name = fix_target_surfaced['device']
-        cmd = f'umount "/media/{device_name}"'
-        SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        cmd2 = f'newfs "/dev/{device_name}"'
-        results = SSH_TEST(cmd2, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-        yield fix_target_surfaced
-
-    def test_format_target_volume(self, fix_format_target_volume):
-        pass
-
-
-class Mounted:
-    @pytest.fixture(scope='class')
-    def fix_create_iscsi_mountpoint(self):
-        cmd = f'mkdir -p {self.MOUNTPOINT}'
-        results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-        try:
-            yield
-        finally:
-            cmd = f'rm -rf "{self.MOUNTPOINT}"'
-            results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-            assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-    @pytest.fixture(scope='class')
-    def fix_mount_target_volume(self, fix_target_surfaced, fix_create_iscsi_mountpoint):
-        device_name = fix_target_surfaced['device']
-        cmd = f'mount "/dev/{device_name}" "{self.MOUNTPOINT}"'
-        # Allow some settle time (if we've just logged in a previously formatted target)
-        sleep(5)
-        results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-        assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-        try:
-            result = {}
-            result.update(fix_target_surfaced)
-            result.update({'mountpoint': self.MOUNTPOINT})
-            yield
-        finally:
-            cmd = f'umount "{self.MOUNTPOINT}"'
-            results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-            assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-    def test_create_iscsi_mountpoint(self, fix_create_iscsi_mountpoint):
-        pass
-
-    def test_mount_target_volume(self, fix_mount_target_volume):
-        pass
-
-
-class TestFileTarget(FileExtent, Target):
-    TARGET_NAME = target_name
-
-    class TestLoggedIn(LoggedInTarget):
-        pass
-
-        class TestFormatted(Formatted):
-            pass
-
-            class TestMounted(Mounted):
-                MOUNTPOINT = file_mountpoint
-
-                def test_create_file(self, fix_mount_target_volume):
-                    cmd = 'touch "%s/testfile"' % self.MOUNTPOINT
-                    results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-                def test_move_file(self, fix_mount_target_volume):
-                    cmd = 'mv "%s/testfile" "%s/testfile2"' % (self.MOUNTPOINT, self.MOUNTPOINT)
-                    results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-                def test_copy_file(self, fix_mount_target_volume):
-                    cmd = 'cp "%s/testfile2" "%s/testfile"' % (self.MOUNTPOINT, self.MOUNTPOINT)
-                    results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-                def test_delete_file(self, fix_mount_target_volume):
-                    results = SSH_TEST('rm "%s/testfile2"' % self.MOUNTPOINT,
-                                       BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-
-class TestZvolTarget(ZvolExtent, Target):
-    TARGET_NAME = zvol_name
-
-    class TestLoggedIn(LoggedInTarget):
-        pass
-
-        class TestFormatted(Formatted):
-            pass
-
-            class TestMounted(Mounted):
-                MOUNTPOINT = zvol_mountpoint
-
-                def test_create_file(self, fix_mount_target_volume):
-                    cmd = 'touch "%s/myfile.txt"' % self.MOUNTPOINT
-                    results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-                def test_move_file(self, fix_mount_target_volume):
-                    cmd = 'mv "%s/myfile.txt" "%s/newfile.txt"' % (self.MOUNTPOINT, self.MOUNTPOINT)
-                    results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'] is True, f"{results['output']}, {results['stderr']}"
-
-                def test_create_directory_in_zvol_iscsi_share(self, fix_mount_target_volume):
-                    cmd = f'mkdir "{self.MOUNTPOINT}/mydir"'
-                    results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'], f"{results['output']}, {results['stderr']}"
-
-                def test_copy_file_to_new_dir_in_zvol_iscsi_share(self, fix_mount_target_volume):
-                    cmd = f'cp "{self.MOUNTPOINT}/newfile.txt" "{self.MOUNTPOINT}/mydir/myfile.txt"'
-                    results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                    assert results['result'], f"{results['output']}, {results['stderr']}"
-
-        def test_verify_the_zvol_mountpoint_is_empty(self):
-            cmd = f'test -f {zvol_mountpoint}/newfile.txt'
-            results = SSH_TEST(cmd, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-            assert not results['result'], f"{results['output']}, {results['stderr']}"
-
-    class TestLoggedInAgain(LoggedInTarget):
-        pass
-
-        class TestMounted(Mounted):
-            MOUNTPOINT = zvol_mountpoint
-
-            def test_verify_files_and_directory_kept_on_the_zvol_iscsi_share(self):
-                cmd1 = f'test -f {zvol_mountpoint}/newfile.txt'
-                results1 = SSH_TEST(cmd1, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                assert results1['result'], results1['output']
-                cmd2 = f'test -f "{zvol_mountpoint}/mydir/myfile.txt"'
-                results2 = SSH_TEST(cmd2, BSD_USERNAME, BSD_PASSWORD, BSD_HOST)
-                assert results2['result'], results2['output']
diff --git a/tests/api2/test_261_iscsi_cmd.py b/tests/api2/test_261_iscsi_cmd.py
deleted file mode 100644
index 88748ce827ccc..0000000000000
--- a/tests/api2/test_261_iscsi_cmd.py
+++ /dev/null
@@ -1,2771 +0,0 @@
-import contextlib
-import enum
-import errno
-import ipaddress
-import os
-import random
-import socket
-import string
-from time import sleep
-
-import iscsi
-import pyscsi
-import pytest
-import requests
-from assets.websocket.iscsi import (alua_enabled, initiator, initiator_portal,
-                                    portal, read_capacity16, target,
-                                    target_extent_associate, verify_capacity,
-                                    verify_luns, verify_ha_inquiry, verify_ha_device_identification, TUR)
-from middlewared.service_exception import InstanceNotFound, ValidationError, ValidationErrors
-from middlewared.test.integration.assets.iscsi import target_login_test
-from middlewared.test.integration.assets.pool import dataset, snapshot
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-from pyscsi.pyscsi.scsi_sense import sense_ascq_dict
-from pytest_dependency import depends
-
-from auto_config import ha, hostname, isns_ip, password, pool_name, user
-from functions import SSH_TEST
-from protocols import (initiator_name_supported, iscsi_scsi_connection,
-                       isns_connection)
-
-# Setup some flags that will enable/disable tests based upon the capabilities of the
-# python-scsi package in use
-try:
-    from pyscsi.pyscsi.scsi_cdb_persistentreservein import PR_SCOPE, PR_TYPE
-    pyscsi_has_persistent_reservations = 'PersistentReserveOut' in dir(pyscsi.pyscsi.scsi)
-    LU_SCOPE = PR_SCOPE.LU_SCOPE
-except ImportError:
-    pyscsi_has_persistent_reservations = False
-    LU_SCOPE = 0
-skip_persistent_reservations = pytest.mark.skipif(not pyscsi_has_persistent_reservations,
-                                                  reason="PYSCSI does not support persistent reservations")
-
-skip_multi_initiator = pytest.mark.skipif(not initiator_name_supported(),
-                                          reason="PYSCSI does not support persistent reservations")
-
-skip_ha_tests = pytest.mark.skipif(not (ha and "virtual_ip" in os.environ), reason="Skip HA tests")
-
-
-skip_invalid_initiatorname = pytest.mark.skipif(not initiator_name_supported(),
-                                                reason="Invalid initiatorname will be presented")
-
-pyscsi_has_report_target_port_groups = 'ReportTargetPortGroups' in dir(pyscsi.pyscsi.scsi)
-
-# See: https://github.com/python-scsi/cython-iscsi/pull/8
-pyscsi_supports_check_condition = hasattr(iscsi.Task, 'raw_sense')
-skip_no_check_condition = pytest.mark.skipif(not pyscsi_supports_check_condition, "PYSCSI does not support CHECK CONDITION")
-
-
-# The following strings are taken from pyscsi/pyscsi/scsi_exception
-class CheckType(enum.Enum):
-    CHECK_CONDITION = "CheckCondition"
-    CONDITIONS_MET = "ConditionsMet"
-    BUSY_STATUS = "BusyStatus"
-    RESERVATION_CONFLICT = "ReservationConflict"
-    TASK_SET_FULL = "TaskSetFull"
-    ACA_ACTIVE = "ACAActive"
-    TASK_ABORTED = "TaskAborted"
-
-    def __str__(self):
-        return self.value
-
-
-# Some constants
-MB = 1024 * 1024
-MB_100 = 100 * MB
-MB_200 = 200 * MB
-MB_256 = 256 * MB
-MB_512 = 512 * MB
-PR_KEY1 = 0xABCDEFAABBCCDDEE
-PR_KEY2 = 0x00000000DEADBEEF
-CONTROLLER_A_TARGET_PORT_GROUP_ID = 101
-CONTROLLER_B_TARGET_PORT_GROUP_ID = 102
-
-# Some variables
-digit = ''.join(random.choices(string.digits, k=2))
-file_mountpoint = f'/tmp/iscsi-file-{hostname}'
-zvol_mountpoint = f'/tmp/iscsi-zvol-{hostname}'
-target_name = f"target{digit}"
-dataset_name = f"iscsids{digit}"
-file_name = f"iscsi{digit}"
-basename = "iqn.2005-10.org.freenas.ctl"
-zvol_name = f"ds{digit}"
-zvol = f'{pool_name}/{zvol_name}'
-
-
-def snapshot_rollback(snapshot_id):
-    call('zfs.snapshot.rollback', snapshot_id)
-
-
-def other_node(node):
-    if node == 'A':
-        return 'B'
-    if node == 'B':
-        return 'A'
-    raise ValueError("Invalid node supplied")
-
-
-def get_ip_addr(ip):
-    try:
-        ipaddress.ip_address(ip)
-        return ip
-    except ValueError:
-        actual_ip = socket.gethostbyname(ip)
-        ipaddress.ip_address(actual_ip)
-        return actual_ip
-
-
-@contextlib.contextmanager
-def iscsi_auth(tag, user, secret, peeruser=None, peersecret=None, discovery_auth=None):
-    payload = {
-        'tag': tag,
-        'user': user,
-        'secret': secret,
-    }
-    if peeruser and peersecret:
-        payload.update({
-            'peeruser': peeruser,
-            'peersecret': peersecret
-        })
-    if discovery_auth:
-        payload.update({
-            'discovery_auth': discovery_auth
-        })
-    auth_config = call('iscsi.auth.create', payload)
-
-    try:
-        yield auth_config
-    finally:
-        call('iscsi.auth.delete', auth_config['id'])
-
-
-@contextlib.contextmanager
-def file_extent(pool_name, dataset_name, file_name, filesize=MB_512, extent_name='extent', serial=None):
-    payload = {
-        'type': 'FILE',
-        'name': extent_name,
-        'filesize': filesize,
-        'path': f'/mnt/{pool_name}/{dataset_name}/{file_name}'
-    }
-    # We want to allow any non-None serial to be specified (even '')
-    if serial is not None:
-        payload.update({'serial': serial})
-    extent_config = call('iscsi.extent.create', payload)
-
-    try:
-        yield extent_config
-    finally:
-        call('iscsi.extent.delete', extent_config['id'], True, True)
-
-
-@contextlib.contextmanager
-def zvol_dataset(zvol, volsize=MB_512, recursive=False, force=False):
-    payload = {
-        'name': zvol,
-        'type': 'VOLUME',
-        'volsize': volsize,
-        'volblocksize': '16K'
-    }
-    dataset_config = call('pool.dataset.create', payload)
-
-    try:
-        yield dataset_config
-    finally:
-        try:
-            call('pool.dataset.delete', dataset_config['id'], {'recursive': recursive, 'force': force})
-        except InstanceNotFound:
-            pass
-
-
-def modify_extent(ident, payload):
-    call('iscsi.extent.update', ident, payload)
-
-
-def file_extent_resize(ident, filesize):
-    payload = {
-        'filesize': filesize,
-    }
-    modify_extent(ident, payload)
-
-
-def extent_disable(ident):
-    modify_extent(ident, {'enabled': False})
-
-
-def extent_enable(ident):
-    modify_extent(ident, {'enabled': True})
-
-
-def zvol_resize(zvol, volsize):
-    payload = {
-        'volsize': volsize,
-    }
-    call('pool.dataset.update', zvol, payload)
-
-
-def _get_iscsi_sessions(filters=None):
-    if filters:
-        return call('iscsi.global.sessions', filters)
-    else:
-        return call('iscsi.global.sessions')
-
-
-def get_iscsi_sessions(filters=None, check_length=None):
-    if isinstance(check_length, int):
-        for _ in range(10):
-            data = _get_iscsi_sessions(filters)
-            if len(data) == check_length:
-                return data
-            sleep(1)
-        assert len(data) == check_length, data
-    else:
-        data = _get_iscsi_sessions(filters)
-    return data
-
-
-def get_client_count():
-    return call('iscsi.global.client_count')
-
-
-def get_volthreading(zvolid):
-    return call('zfs.dataset.query', [['id', '=', zvolid]], {'get': True})['properties']['volthreading']['value']
-
-
-def verify_client_count(count, retries=10):
-    """Verify that the client count is the expected value, but include some
-    retries to allow things to settle if necessary."""
-    assert retries > 0
-    while retries:
-        if get_client_count() == count:
-            # All is good
-            return
-        retries -= 1
-        sleep(1)
-    assert get_client_count() == count
-
-
-@contextlib.contextmanager
-def zvol_extent(zvol, extent_name='zvol_extent'):
-    payload = {
-        'type': 'DISK',
-        'disk': f'zvol/{zvol}',
-        'name': extent_name,
-    }
-    extent_config = call('iscsi.extent.create', payload)
-
-    try:
-        yield extent_config
-    finally:
-        try:
-            call('iscsi.extent.delete', extent_config['id'], True, True)
-        except InstanceNotFound:
-            pass
-
-
-@contextlib.contextmanager
-def configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name, alias=None, filesize=MB_512, extent_name='extent'):
-    portal_id = config['portal']['id']
-    with target(target_name, [{'portal': portal_id}], alias) as target_config:
-        target_id = target_config['id']
-        with dataset(dataset_name) as dataset_config:
-            with file_extent(pool_name, dataset_name, file_name, filesize=filesize, extent_name=extent_name) as extent_config:
-                extent_id = extent_config['id']
-                with target_extent_associate(target_id, extent_id):
-                    newconfig = config.copy()
-                    newconfig.update({
-                        'target': target_config,
-                        'dataset': dataset_config,
-                        'extent': extent_config,
-                    })
-                    yield newconfig
-
-
-@contextlib.contextmanager
-def add_file_extent_target_lun(config, lun, filesize=MB_512, extent_name=None):
-    name = config['target']['name']
-    target_id = config['target']['id']
-    dataset_name = f"iscsids{name}"
-    lun_file_name = f'{name}_lun{lun}'
-    if not extent_name:
-        extent_name = lun_file_name
-    with file_extent(pool_name, dataset_name, lun_file_name, filesize=filesize, extent_name=extent_name) as extent_config:
-        extent_id = extent_config['id']
-        with target_extent_associate(target_id, extent_id, lun):
-            newconfig = config.copy()
-            newconfig.update({
-                f'extent_lun{lun}': extent_config,
-            })
-            yield newconfig
-
-
-@contextlib.contextmanager
-def configured_target_to_zvol_extent(config, target_name, zvol, alias=None, extent_name='zvol_extent', volsize=MB_512):
-    portal_id = config['portal']['id']
-    with target(target_name, [{'portal': portal_id}], alias) as target_config:
-        target_id = target_config['id']
-        with zvol_dataset(zvol, volsize) as dataset_config:
-            with zvol_extent(zvol, extent_name=extent_name) as extent_config:
-                extent_id = extent_config['id']
-                with target_extent_associate(target_id, extent_id) as associate_config:
-                    newconfig = config.copy()
-                    newconfig.update({
-                        'associate': associate_config,
-                        'target': target_config,
-                        'dataset': dataset_config['id'],
-                        'extent': extent_config,
-                    })
-                    yield newconfig
-
-
-@contextlib.contextmanager
-def add_zvol_extent_target_lun(config, lun, volsize=MB_512, extent_name=None):
-    name = config['target']['name']
-    zvol_name = f"ds{name}"
-    zvol = f'{pool_name}/{zvol_name}_lun{lun}'
-    target_id = config['target']['id']
-    lun_file_name = f'{name}_lun{lun}'
-    if not extent_name:
-        extent_name = lun_file_name
-        with zvol_dataset(zvol, volsize) as dataset_config:
-            with zvol_extent(zvol, extent_name=extent_name) as extent_config:
-                extent_id = extent_config['id']
-                with target_extent_associate(target_id, extent_id, lun) as associate_config:
-                    newconfig = config.copy()
-                    newconfig.update({
-                        f'dataset_lun{lun}': dataset_config,
-                        f'associate_lun{lun}': associate_config,
-                        f'extent_lun{lun}': extent_config,
-                    })
-                    yield newconfig
-
-
-@contextlib.contextmanager
-def configured_target(config, name, extent_type, alias=None, extent_size=MB_512):
-    assert extent_type in ["FILE", "VOLUME"]
-    if extent_type == "FILE":
-        ds_name = f"iscsids{name}"
-        with configured_target_to_file_extent(config, name, pool_name, ds_name, file_name, alias, extent_size, name) as newconfig:
-            yield newconfig
-    elif extent_type == "VOLUME":
-        zvol_name = f"ds{name}"
-        zvol = f'{pool_name}/{zvol_name}'
-        with configured_target_to_zvol_extent(config, name, zvol, alias, name, extent_size) as newconfig:
-            yield newconfig
-
-
-@contextlib.contextmanager
-def isns_enabled(delay=5):
-    payload = {'isns_servers': [isns_ip]}
-    call('iscsi.global.update', payload)
-    try:
-        yield
-    finally:
-        payload = {'isns_servers': []}
-        call('iscsi.global.update', payload)
-        if delay:
-            print(f'Sleeping for {delay} seconds after turning off iSNS')
-            sleep(delay)
-
-
-def expect_check_condition(s, text=None, check_type=CheckType.CHECK_CONDITION):
-    """
-    Expect a CHECK CONDITION containing the specified text.
-
-    :param s: a pyscsi.SCSI instance
-    :param text: string expected as part of the CHECK CONDITION
-    :param check_type: CheckType enum of the expected CHECK_CONDITION
-
-    Issue a TEST UNIT READY and verify that the expected CHECK CONDITION is raised.
-
-    If this version of pyscsi(/cython-iscsi) does not support CHECK CONDITION
-    then just swallow the condition by issuing another TEST UNIT READY.
-    """
-    assert check_type in CheckType, f"Parameter '{check_type}' is not a CheckType"
-    if pyscsi_supports_check_condition:
-        with pytest.raises(Exception) as excinfo:
-            s.testunitready()
-
-        e = excinfo.value
-        assert e.__class__.__name__ == str(check_type), f"Unexpected CHECK CONDITION type.  Got '{e.__class__.__name__}', expected {str(check_type)}"
-        if text:
-            assert text in str(e), f"Exception did not match: {text}"
-    else:
-        # If we cannot detect a CHECK CONDITION, then swallow it by retrying a TUR
-        try:
-            s.testunitready()
-        except TypeError:
-            s.testunitready()
-
-
-def _verify_inquiry(s):
-    """
-    Verify that the supplied SCSI has the expected INQUIRY response.
-
-    :param s: a pyscsi.SCSI instance
-    """
-    TUR(s)
-    r = s.inquiry()
-    data = r.result
-    assert data['t10_vendor_identification'].decode('utf-8').startswith("TrueNAS"), str(data)
-    assert data['product_identification'].decode('utf-8').startswith("iSCSI Disk"), str(data)
-
-
-def get_target(targetid):
-    """
-    Return target JSON data.
-    """
-    return call('iscsi.target.get_instance', int(targetid))
-
-
-def get_targets():
-    """
-    Return a dictionary of target JSON data, keyed by target name.
-    """
-    return {target['name']: target for target in call('iscsi.target.query')}
-
-
-def modify_target(targetid, payload):
-    call('iscsi.target.update', targetid, payload)
-
-
-def set_target_alias(targetid, newalias):
-    modify_target(targetid, {'alias': newalias})
-
-
-def set_target_initiator_id(targetid, initiatorid):
-    target_data = get_target(targetid)
-
-    assert 'groups' in target_data, target_data
-    groups = target_data['groups']
-    assert len(groups) == 1, target_data
-
-    groups[0]['initiator'] = initiatorid
-    modify_target(targetid, {'groups': groups})
-
-
-def _get_service(service_name='iscsitarget'):
-    return call('service.query', [['service', '=', service_name]], {'get': True})
-
-
-@pytest.mark.dependency(name="iscsi_cmd_00")
-def test_00_setup(request):
-    # Enable iSCSI service
-    payload = {"enable": True}
-    call('service.update', 'iscsitarget', payload)
-    # Start iSCSI service
-    call('service.start', 'iscsitarget')
-    sleep(1)
-    # Verify running
-    service = _get_service()
-    assert service['state'] == "RUNNING", service
-
-
-def test_01_inquiry(request):
-    """
-    This tests the Vendor and Product information in an INQUIRY response
-    are 'TrueNAS' and 'iSCSI Disk' respectively.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    with initiator():
-        with portal() as portal_config:
-            portal_id = portal_config['id']
-            with target(target_name, [{'portal': portal_id}]) as target_config:
-                target_id = target_config['id']
-                with dataset(dataset_name):
-                    with file_extent(pool_name, dataset_name, file_name) as extent_config:
-                        extent_id = extent_config['id']
-                        with target_extent_associate(target_id, extent_id):
-                            iqn = f'{basename}:{target_name}'
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                _verify_inquiry(s)
-
-
-def test_02_read_capacity16(request):
-    """
-    This tests that the target created returns the correct size to READ CAPACITY (16).
-
-    It performs this test with a couple of sizes for both file & zvol based targets.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    with initiator():
-        with portal() as portal_config:
-            portal_id = portal_config['id']
-            with target(target_name, [{'portal': portal_id}]) as target_config:
-                target_id = target_config['id']
-                with dataset(dataset_name):
-                    # 100 MB file extent
-                    with file_extent(pool_name, dataset_name, file_name, MB_100) as extent_config:
-                        extent_id = extent_config['id']
-                        with target_extent_associate(target_id, extent_id):
-                            iqn = f'{basename}:{target_name}'
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                verify_capacity(s, MB_100)
-                    # 512 MB file extent
-                    with file_extent(pool_name, dataset_name, file_name, MB_512) as extent_config:
-                        extent_id = extent_config['id']
-                        with target_extent_associate(target_id, extent_id):
-                            iqn = f'{basename}:{target_name}'
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                verify_capacity(s, MB_512)
-                # 100 MB zvol extent
-                with zvol_dataset(zvol, MB_100):
-                    with zvol_extent(zvol) as extent_config:
-                        extent_id = extent_config['id']
-                        with target_extent_associate(target_id, extent_id):
-                            iqn = f'{basename}:{target_name}'
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                verify_capacity(s, MB_100)
-                # 512 MB zvol extent
-                with zvol_dataset(zvol):
-                    with zvol_extent(zvol) as extent_config:
-                        extent_id = extent_config['id']
-                        with target_extent_associate(target_id, extent_id):
-                            iqn = f'{basename}:{target_name}'
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                verify_capacity(s, MB_512)
-
-
-def target_test_readwrite16(ip, iqn):
-    """
-    This tests WRITE SAME (16), READ (16) and WRITE (16)
-    operations on the specified target.
-    """
-    zeros = bytearray(512)
-    deadbeef = bytearray.fromhex('deadbeef') * 128
-    deadbeef_lbas = [1, 5, 7]
-
-    with iscsi_scsi_connection(ip, iqn) as s:
-        TUR(s)
-
-        # First let's write zeros to the first 12 blocks using WRITE SAME (16)
-        s.writesame16(0, 12, zeros)
-
-        # Check results using READ (16)
-        for lba in range(0, 12):
-            r = s.read16(lba, 1)
-            assert r.datain == zeros, r.datain
-
-        # Now let's write DEADBEEF to a few LBAs using WRITE (16)
-        for lba in deadbeef_lbas:
-            s.write16(lba, 1, deadbeef)
-
-        # Check results using READ (16)
-        for lba in range(0, 12):
-            r = s.read16(lba, 1)
-            if lba in deadbeef_lbas:
-                assert r.datain == deadbeef, r.datain
-            else:
-                assert r.datain == zeros, r.datain
-
-    # Drop the iSCSI connection and login again
-    with iscsi_scsi_connection(ip, iqn) as s:
-        TUR(s)
-
-        # Check results using READ (16)
-        for lba in range(0, 12):
-            r = s.read16(lba, 1)
-            if lba in deadbeef_lbas:
-                assert r.datain == deadbeef, r.datain
-            else:
-                assert r.datain == zeros, r.datain
-
-        # Do a WRITE for > 1 LBA
-        s.write16(10, 2, deadbeef * 2)
-
-        # Check results using READ (16)
-        deadbeef_lbas.extend([10, 11])
-        for lba in range(0, 12):
-            r = s.read16(lba, 1)
-            if lba in deadbeef_lbas:
-                assert r.datain == deadbeef, r.datain
-            else:
-                assert r.datain == zeros, r.datain
-
-        # Do a couple of READ (16) for > 1 LBA
-        # At this stage we have written deadbeef to LBAs 1,5,7,10,11
-        r = s.read16(0, 2)
-        assert r.datain == zeros + deadbeef, r.datain
-        r = s.read16(1, 2)
-        assert r.datain == deadbeef + zeros, r.datain
-        r = s.read16(2, 2)
-        assert r.datain == zeros * 2, r.datain
-        r = s.read16(10, 2)
-        assert r.datain == deadbeef * 2, r.datain
-
-
-def test_03_readwrite16_file_extent(request):
-    """
-    This tests WRITE SAME (16), READ (16) and WRITE (16) operations with
-    a file extent based iSCSI target.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    with initiator_portal() as config:
-        with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name):
-            iqn = f'{basename}:{target_name}'
-            target_test_readwrite16(truenas_server.ip, iqn)
-
-
-def test_04_readwrite16_zvol_extent(request):
-    """
-    This tests WRITE SAME (16), READ (16) and WRITE (16) operations with
-    a zvol extent based iSCSI target.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    with initiator_portal() as config:
-        with configured_target_to_zvol_extent(config, target_name, zvol):
-            iqn = f'{basename}:{target_name}'
-            target_test_readwrite16(truenas_server.ip, iqn)
-
-
-@skip_invalid_initiatorname
-def test_05_chap(request):
-    """
-    This tests that CHAP auth operates as expected.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    user = "user1"
-    secret = 'sec1' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
-    with initiator():
-        with portal() as portal_config:
-            portal_id = portal_config['id']
-            auth_tag = 1
-            with iscsi_auth(auth_tag, user, secret):
-                with target(target_name, [{'portal': portal_id, 'authmethod': 'CHAP', 'auth': auth_tag}]) as target_config:
-                    target_id = target_config['id']
-                    with dataset(dataset_name):
-                        with file_extent(pool_name, dataset_name, file_name) as extent_config:
-                            extent_id = extent_config['id']
-                            with target_extent_associate(target_id, extent_id):
-                                iqn = f'{basename}:{target_name}'
-
-                                # Try and fail to connect without supplying CHAP creds
-                                with pytest.raises(RuntimeError) as ve:
-                                    with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                        TUR(s)
-                                        assert False, "Should not have been able to connect without CHAP credentials."
-                                assert 'Unable to connect to' in str(ve), ve
-
-                                # Try and fail to connect supplying incorrect CHAP creds
-                                with pytest.raises(RuntimeError) as ve:
-                                    with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, "WrongSecret") as s:
-                                        TUR(s)
-                                        assert False, "Should not have been able to connect without CHAP credentials."
-                                assert 'Unable to connect to' in str(ve), ve
-
-                                # Finally ensure we can connect with the right CHAP creds
-                                with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret) as s:
-                                    _verify_inquiry(s)
-
-
-@skip_invalid_initiatorname
-def test_06_mutual_chap(request):
-    """
-    This tests that Mutual CHAP auth operates as expected.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    user = "user1"
-    secret = 'sec1' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
-    peer_user = "user2"
-    peer_secret = 'sec2' + ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
-    with initiator():
-        with portal() as portal_config:
-            portal_id = portal_config['id']
-            auth_tag = 1
-            with iscsi_auth(auth_tag, user, secret, peer_user, peer_secret):
-                with target(target_name, [{'portal': portal_id, 'authmethod': 'CHAP_MUTUAL', 'auth': auth_tag}]) as target_config:
-                    target_id = target_config['id']
-                    with dataset(dataset_name):
-                        with file_extent(pool_name, dataset_name, file_name) as extent_config:
-                            extent_id = extent_config['id']
-                            with target_extent_associate(target_id, extent_id):
-                                iqn = f'{basename}:{target_name}'
-
-                                # Try and fail to connect without supplying Mutual CHAP creds
-                                with pytest.raises(RuntimeError) as ve:
-                                    with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                        TUR(s)
-                                        assert False, "Should not have been able to connect without CHAP credentials."
-                                assert 'Unable to connect to' in str(ve), ve
-
-                                # Try and fail to connect supplying incorrect CHAP creds (not mutual)
-                                with pytest.raises(RuntimeError) as ve:
-                                    with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, "WrongSecret") as s:
-                                        TUR(s)
-                                        assert False, "Should not have been able to connect with incorrect CHAP credentials."
-                                assert 'Unable to connect to' in str(ve), ve
-
-                                # Ensure we can connect with the right CHAP creds, if we *choose* not
-                                # to validate things.
-                                with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret) as s:
-                                    _verify_inquiry(s)
-
-                                # Try and fail to connect supplying incorrect Mutual CHAP creds
-                                with pytest.raises(RuntimeError) as ve:
-                                    with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret, peer_user, "WrongSecret") as s:
-                                        TUR(s)
-                                        assert False, "Should not have been able to connect with incorrect Mutual CHAP credentials."
-                                assert 'Unable to connect to' in str(ve), ve
-
-                                # Finally ensure we can connect with the right Mutual CHAP creds
-                                with iscsi_scsi_connection(truenas_server.ip, iqn, 0, user, secret, peer_user, peer_secret) as s:
-                                    _verify_inquiry(s)
-
-
-def _assert_auth(auth, tag, user, secret, peeruser, peersecret, discovery_auth):
-    assert auth['tag'] == tag
-    assert auth['user'] == user
-    assert auth['secret'] == secret
-    assert auth['peeruser'] == peeruser
-    if peeruser:
-        assert auth['peersecret'] == peersecret
-    assert auth['discovery_auth'] == discovery_auth
-
-
-def test_06_discovery_auth():
-    """
-    Test Discovery Auth
-    """
-    randsec = ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=10))
-    assert [] == call('iscsi.auth.query')
-
-    # Create a regular auth (without discovery auth) and then try to modify it.
-    with iscsi_auth(1, 'user1', 'sec1' + randsec) as config1:
-        _assert_auth(config1, 1, 'user1', 'sec1' + randsec, '', None, 'NONE')
-
-        # Change discovery_auth to CHAP
-        config2 = call('iscsi.auth.update', config1['id'], {'discovery_auth': 'CHAP'})
-        _assert_auth(config2, 1, 'user1', 'sec1' + randsec, '', None, 'CHAP')
-
-        # Try to change discovery_auth to CHAP_MUTUAL (will fail, no peeruser)
-        with pytest.raises(ValidationErrors) as ve:
-            call('iscsi.auth.update', config1['id'], {'discovery_auth': 'CHAP_MUTUAL'})
-        assert ve.value.errors == [
-            ValidationError(
-                'iscsi_auth_update.discovery_auth',
-                'Cannot specify CHAP_MUTUAL if peer_user has not been defined.'
-            )]
-
-        # Change discovery_auth to CHAP_MUTUAL (incl add peeruser)
-        call('iscsi.auth.update', config1['id'], {'peeruser': 'user2',
-                                                  'peersecret': 'sec2' + randsec,
-                                                  'discovery_auth': 'CHAP_MUTUAL'})
-        config3 = call('iscsi.auth.query', [['id', '=', config1['id']]], {'get': True})
-        _assert_auth(config3, 1, 'user1', 'sec1' + randsec, 'user2', 'sec2' + randsec, 'CHAP_MUTUAL')
-
-        # Try to create 2nd discovery_auth with CHAP_MUTUAL (will fail, too many CHAP_MUTUAL)
-        second_auth = {
-            'tag': 2,
-            'user': 'user3',
-            'secret': 'sec3' + randsec,
-            'peeruser': 'user4',
-            'peersecret': 'sec4' + randsec,
-            'discovery_auth': 'CHAP_MUTUAL',
-        }
-        with pytest.raises(ValidationErrors) as ve:
-            call('iscsi.auth.create', second_auth | {'discovery_auth': 'CHAP_MUTUAL'})
-        assert ve.value.errors == [
-            ValidationError(
-                'iscsi_auth_create.discovery_auth',
-                'Cannot specify CHAP_MUTUAL as only one such entry is permitted.'
-            )]
-
-        # Create 2nd discovery_auth with CHAP
-        with iscsi_auth(2, 'user3', 'sec3' + randsec, 'user4', 'sec4' + randsec, 'CHAP') as config4:
-            _assert_auth(config4, 2, 'user3', 'sec3' + randsec, 'user4', 'sec4' + randsec, 'CHAP')
-
-            # Try to change 2nd discovery_auth to CHAP_MUTUAL (will fail, too many CHAP_MUTUAL)
-            with pytest.raises(ValidationErrors) as ve:
-                call('iscsi.auth.update', config4['id'], {'discovery_auth': 'CHAP_MUTUAL'})
-            assert ve.value.errors == [
-                ValidationError(
-                    'iscsi_auth_update.discovery_auth',
-                    'Cannot specify CHAP_MUTUAL as only one such entry is permitted.'
-                )]
-            _assert_auth(config4, 2, 'user3', 'sec3' + randsec, 'user4', 'sec4' + randsec, 'CHAP')
-
-            # Change 1st discovery_auth to NONE
-            config5 = call('iscsi.auth.update', config1['id'], {'discovery_auth': 'NONE'})
-            _assert_auth(config5, 1, 'user1', 'sec1' + randsec, 'user2', 'sec2' + randsec, 'NONE')
-
-            # Change 2nd discovery_auth to CHAP_MUTUAL
-            config6 = call('iscsi.auth.update', config4['id'], {'discovery_auth': 'CHAP_MUTUAL'})
-            _assert_auth(config6, 2, 'user3', 'sec3' + randsec, 'user4', 'sec4' + randsec, 'CHAP_MUTUAL')
-
-    assert [] == call('iscsi.auth.query')
-
-
-def test_07_report_luns(request):
-    """
-    This tests REPORT LUNS and accessing multiple LUNs on a target.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    iqn = f'{basename}:{target_name}'
-    with initiator():
-        with portal() as portal_config:
-            portal_id = portal_config['id']
-            with target(target_name, [{'portal': portal_id}]) as target_config:
-                target_id = target_config['id']
-                with dataset(dataset_name):
-                    # LUN 0 (100 MB file extent)
-                    with file_extent(pool_name, dataset_name, file_name, MB_100) as extent_config:
-                        extent_id = extent_config['id']
-                        with target_extent_associate(target_id, extent_id):
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                verify_luns(s, [0])
-                                verify_capacity(s, MB_100)
-                            # Now create a 512 MB zvol and associate with LUN 1
-                            with zvol_dataset(zvol):
-                                with zvol_extent(zvol) as extent_config:
-                                    extent_id = extent_config['id']
-                                    with target_extent_associate(target_id, extent_id, 1):
-                                        # Connect to LUN 0
-                                        with iscsi_scsi_connection(truenas_server.ip, iqn, 0) as s0:
-                                            verify_luns(s0, [0, 1])
-                                            verify_capacity(s0, MB_100)
-                                        # Connect to LUN 1
-                                        with iscsi_scsi_connection(truenas_server.ip, iqn, 1) as s1:
-                                            verify_luns(s1, [0, 1])
-                                            verify_capacity(s1, MB_512)
-                            # Check again now that LUN 1 has been removed again.
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                verify_luns(s, [0])
-                                verify_capacity(s, MB_100)
-
-
-def target_test_snapshot_single_login(ip, iqn, dataset_id):
-    """
-    This tests snapshots with an iSCSI target using a single
-    iSCSI session.
-    """
-    zeros = bytearray(512)
-    deadbeef = bytearray.fromhex('deadbeef') * 128
-    deadbeef_lbas = [1, 5, 7]
-    all_deadbeef_lbas = [1, 5, 7, 10, 11]
-
-    with iscsi_scsi_connection(ip, iqn) as s:
-        TUR(s)
-
-        # First let's write zeros to the first 12 blocks using WRITE SAME (16)
-        s.writesame16(0, 12, zeros)
-
-        # Check results using READ (16)
-        for lba in range(0, 12):
-            r = s.read16(lba, 1)
-            assert r.datain == zeros, r.datain
-
-        # Take snap0
-        with snapshot(dataset_id, "snap0", get=True) as snap0_config:
-
-            # Now let's write DEADBEEF to a few LBAs using WRITE (16)
-            for lba in deadbeef_lbas:
-                s.write16(lba, 1, deadbeef)
-
-            # Check results using READ (16)
-            for lba in range(0, 12):
-                r = s.read16(lba, 1)
-                if lba in deadbeef_lbas:
-                    assert r.datain == deadbeef, r.datain
-                else:
-                    assert r.datain == zeros, r.datain
-
-            # Take snap1
-            with snapshot(dataset_id, "snap1", get=True) as snap1_config:
-
-                # Do a WRITE for > 1 LBA
-                s.write16(10, 2, deadbeef * 2)
-
-                # Check results using READ (16)
-                for lba in range(0, 12):
-                    r = s.read16(lba, 1)
-                    if lba in all_deadbeef_lbas:
-                        assert r.datain == deadbeef, r.datain
-                    else:
-                        assert r.datain == zeros, r.datain
-
-                # Now revert to snap1
-                snapshot_rollback(snap1_config['id'])
-
-                # Check results using READ (16)
-                for lba in range(0, 12):
-                    r = s.read16(lba, 1)
-                    if lba in deadbeef_lbas:
-                        assert r.datain == deadbeef, r.datain
-                    else:
-                        assert r.datain == zeros, r.datain
-
-            # Now revert to snap0
-            snapshot_rollback(snap0_config['id'])
-
-            # Check results using READ (16)
-            for lba in range(0, 12):
-                r = s.read16(lba, 1)
-                assert r.datain == zeros, r.datain
-
-
-def target_test_snapshot_multiple_login(ip, iqn, dataset_id):
-    """
-    This tests snapshots with an iSCSI target using multiple
-    iSCSI sessions.
-    """
-    zeros = bytearray(512)
-    deadbeef = bytearray.fromhex('deadbeef') * 128
-    deadbeef_lbas = [1, 5, 7]
-    all_deadbeef_lbas = [1, 5, 7, 10, 11]
-
-    with iscsi_scsi_connection(ip, iqn) as s:
-        TUR(s)
-
-        # First let's write zeros to the first 12 blocks using WRITE SAME (16)
-        s.writesame16(0, 12, zeros)
-
-        # Check results using READ (16)
-        for lba in range(0, 12):
-            r = s.read16(lba, 1)
-            assert r.datain == zeros, r.datain
-
-    # Take snap0
-    with snapshot(dataset_id, "snap0", get=True) as snap0_config:
-
-        with iscsi_scsi_connection(ip, iqn) as s:
-            TUR(s)
-
-            # Now let's write DEADBEEF to a few LBAs using WRITE (16)
-            for lba in deadbeef_lbas:
-                s.write16(lba, 1, deadbeef)
-
-            # Check results using READ (16)
-            for lba in range(0, 12):
-                r = s.read16(lba, 1)
-                if lba in deadbeef_lbas:
-                    assert r.datain == deadbeef, r.datain
-                else:
-                    assert r.datain == zeros, r.datain
-
-        # Take snap1
-        with snapshot(dataset_id, "snap1", get=True) as snap1_config:
-
-            with iscsi_scsi_connection(ip, iqn) as s:
-                TUR(s)
-
-                # Do a WRITE for > 1 LBA
-                s.write16(10, 2, deadbeef * 2)
-
-                # Check results using READ (16)
-                for lba in range(0, 12):
-                    r = s.read16(lba, 1)
-                    if lba in all_deadbeef_lbas:
-                        assert r.datain == deadbeef, r.datain
-                    else:
-                        assert r.datain == zeros, r.datain
-
-                # Now revert to snap1
-                snapshot_rollback(snap1_config['id'])
-
-        with iscsi_scsi_connection(ip, iqn) as s:
-            TUR(s)
-
-            # Check results using READ (16)
-            for lba in range(0, 12):
-                r = s.read16(lba, 1)
-                if lba in deadbeef_lbas:
-                    assert r.datain == deadbeef, r.datain
-                else:
-                    assert r.datain == zeros, r.datain
-
-        # Now revert to snap0
-        snapshot_rollback(snap0_config['id'])
-
-        with iscsi_scsi_connection(ip, iqn) as s:
-            TUR(s)
-            # Check results using READ (16)
-            for lba in range(0, 12):
-                r = s.read16(lba, 1)
-                assert r.datain == zeros, r.datain
-
-
-def test_08_snapshot_zvol_extent(request):
-    """
-    This tests snapshots with a zvol extent based iSCSI target.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    iqn = f'{basename}:{target_name}'
-    with initiator_portal() as config:
-        with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
-            target_test_snapshot_single_login(truenas_server.ip, iqn, iscsi_config['dataset'])
-        with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
-            target_test_snapshot_multiple_login(truenas_server.ip, iqn, iscsi_config['dataset'])
-
-
-def test_09_snapshot_file_extent(request):
-    """
-    This tests snapshots with a file extent based iSCSI target.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    iqn = f'{basename}:{target_name}'
-    with initiator_portal() as config:
-        with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name) as iscsi_config:
-            target_test_snapshot_single_login(truenas_server.ip, iqn, iscsi_config['dataset'])
-        with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
-            target_test_snapshot_multiple_login(truenas_server.ip, iqn, iscsi_config['dataset'])
-
-
-def test_10_target_alias(request):
-    """
-    This tests iSCSI target alias.
-
-    At the moment SCST does not use the alias usefully (e.g. TargetAlias in
-    LOGIN response).  When this is rectified this test should be extended.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    data = {}
-    for t in ["A", "B"]:
-        data[t] = {}
-        data[t]['name'] = f"{target_name}{t.lower()}"
-        data[t]['alias'] = f"{target_name}{t}_alias"
-        data[t]['file'] = f"{target_name}{t}_file"
-
-    A = data['A']
-    B = data['B']
-    with initiator_portal() as config:
-        with configured_target_to_file_extent(config, A['name'], pool_name, dataset_name, A['file'], A['alias']) as iscsi_config:
-            with target(B['name'], [{'portal': iscsi_config['portal']['id']}]) as targetB_config:
-                with file_extent(pool_name, dataset_name, B['file'], extent_name="extentB") as extentB_config:
-                    with target_extent_associate(targetB_config['id'], extentB_config['id']):
-                        # Created two targets, one with an alias, one without.  Check them.
-                        targets = get_targets()
-                        assert targets[A['name']]['alias'] == A['alias'], targets[A['name']]['alias']
-                        assert targets[B['name']]['alias'] is None, targets[B['name']]['alias']
-
-                        # Update alias for B
-                        set_target_alias(targets[B['name']]['id'], B['alias'])
-                        targets = get_targets()
-                        assert targets[A['name']]['alias'] == A['alias'], targets[A['name']]['alias']
-                        assert targets[B['name']]['alias'] == B['alias'], targets[B['name']]['alias']
-
-                        # Clear alias for A
-                        set_target_alias(targets[A['name']]['id'], "")
-                        targets = get_targets()
-                        assert targets[A['name']]['alias'] is None, targets[A['name']]['alias']
-                        assert targets[B['name']]['alias'] == B['alias'], targets[B['name']]['alias']
-
-                        # Clear alias for B
-                        set_target_alias(targets[B['name']]['id'], "")
-                        targets = get_targets()
-                        assert targets[A['name']]['alias'] is None, targets[A['name']]['alias']
-                        assert targets[B['name']]['alias'] is None, targets[B['name']]['alias']
-
-
-def test_11_modify_portal(request):
-    """
-    Test that we can modify a target portal.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    with portal() as portal_config:
-        assert portal_config['comment'] == 'Default portal', portal_config
-        # First just change the comment
-        payload = {'comment': 'New comment'}
-        call('iscsi.portal.update', portal_config['id'], payload)
-        new_config = call('iscsi.portal.get_instance', portal_config['id'])
-        assert new_config['comment'] == 'New comment', new_config
-        # Then try to reapply everything
-        payload = {'comment': 'test1', 'discovery_authmethod': 'NONE', 'discovery_authgroup': None, 'listen': [{'ip': '0.0.0.0'}]}
-        call('iscsi.portal.update', portal_config['id'], payload)
-        new_config = call('iscsi.portal.get_instance', portal_config['id'])
-        assert new_config['comment'] == 'test1', new_config
-
-
-def test_12_pblocksize_setting(request):
-    """
-    This tests whether toggling pblocksize has the desired result on READ CAPACITY 16, i.e.
-    whether setting it results in LOGICAL BLOCKS PER PHYSICAL BLOCK EXPONENT being zero.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    iqn = f'{basename}:{target_name}'
-    with initiator_portal() as config:
-        with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name) as iscsi_config:
-            extent_config = iscsi_config['extent']
-            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                TUR(s)
-                data = s.readcapacity16().result
-                # By default 512 << 3 == 4096
-                assert data['lbppbe'] == 3, data
-
-                # First let's just change the blocksize to 2K
-                payload = {'blocksize': 2048}
-                call('iscsi.extent.update', extent_config['id'], payload)
-
-                expect_check_condition(s, sense_ascq_dict[0x2900])  # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
-
-                data = s.readcapacity16().result
-                assert data['block_length'] == 2048, data
-                assert data['lbppbe'] == 1, data
-
-                # Now let's change it back to 512, but also set pblocksize
-                payload = {'blocksize': 512, 'pblocksize': True}
-                call('iscsi.extent.update', extent_config['id'], payload)
-
-                expect_check_condition(s, sense_ascq_dict[0x2900])  # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
-
-                data = s.readcapacity16().result
-                assert data['block_length'] == 512, data
-                assert data['lbppbe'] == 0, data
-
-        with configured_target_to_zvol_extent(config, target_name, zvol) as iscsi_config:
-            extent_config = iscsi_config['extent']
-            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                TUR(s)
-                data = s.readcapacity16().result
-                # We created a vol with volblocksize == 16K (512 << 5)
-                assert data['lbppbe'] == 5, data
-
-                # First let's just change the blocksize to 4K
-                payload = {'blocksize': 4096}
-                call('iscsi.extent.update', extent_config['id'], payload)
-
-                expect_check_condition(s, sense_ascq_dict[0x2900])  # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
-
-                data = s.readcapacity16().result
-                assert data['block_length'] == 4096, data
-                assert data['lbppbe'] == 2, data
-
-                # Now let's also set pblocksize
-                payload = {'pblocksize': True}
-                call('iscsi.extent.update', extent_config['id'], payload)
-
-                TUR(s)
-                data = s.readcapacity16().result
-                assert data['block_length'] == 4096, data
-                assert data['lbppbe'] == 0, data
-
-
-def generate_name(length, base="target"):
-    result = f"{base}-{length}-"
-    remaining = length - len(result)
-    assert remaining >= 0, f"Function not suitable for such a short length: {length}"
-    return result + ''.join(random.choices(string.ascii_lowercase + string.digits, k=remaining))
-
-
-@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"])
-def test_13_test_target_name(request, extent_type):
-    """
-    Test the user-supplied target name.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    with initiator_portal() as config:
-        name63 = generate_name(63)
-        name64 = generate_name(64)
-        name65 = generate_name(65)
-        with configured_target(config, name64, extent_type) as config:
-            iqn = f'{basename}:{name64}'
-            target_test_readwrite16(truenas_server.ip, iqn)
-            # Now try to change the length of the extent name
-            call('iscsi.extent.update', config['extent']['id'], {'name': name63})
-            with pytest.raises(ValidationErrors) as ve:
-                call('iscsi.extent.update', config['extent']['id'], {'name': name65})
-            assert ve.value.errors == [
-                ValidationError('iscsi_extent_update.name', 'String should have at most 64 characters', errno.EINVAL),
-            ]
-
-        with pytest.raises(ValidationErrors) as ve:
-            with configured_target(config, name65, extent_type):
-                assert False, f"Should not have been able to create a target with name length {len(name65)}."
-        assert ve.value.errors == [
-            ValidationError('iscsi_extent_create.name', 'String should have at most 64 characters', errno.EINVAL),
-        ]
-
-
-@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"])
-def test_14_target_lun_extent_modify(request, extent_type):
-    """
-    Perform some tests of the iscsi.targetextent.update API, including
-    trying tp provide invalid
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    name1 = f'{target_name}1'
-    name2 = f'{target_name}2'
-    name3 = f'{target_name}3'
-    name4 = f'{target_name}4'
-
-    @contextlib.contextmanager
-    def expect_lun_in_use_failure():
-        with pytest.raises(ValidationErrors) as ve:
-            yield
-            assert False, "Should not be able to associate because LUN in use"
-        assert "LUN ID is already being used for this target." in str(ve.value)
-
-    @contextlib.contextmanager
-    def expect_extent_in_use_failure():
-        with pytest.raises(ValidationErrors) as ve:
-            yield
-            assert False, "Should not be able to associate because extent in use"
-        assert "Extent is already in use" in str(ve.value)
-
-    # The following will create the extents with the same name as the target.
-    with initiator_portal() as config:
-        with configured_target(config, name1, extent_type) as config1:
-            with configured_target(config, name2, extent_type) as config2:
-                with configured_target(config, name3, extent_type) as config3:
-                    # Create an extra extent to 'play' with
-                    with zvol_dataset(zvol):
-                        with zvol_extent(zvol, extent_name=name4) as config4:
-                            # First we will attempt some new, but invalid associations
-
-                            # LUN in use
-                            with expect_lun_in_use_failure():
-                                payload = {
-                                    'target': config1['target']['id'],
-                                    'lunid': 0,
-                                    'extent': config4['id']
-                                }
-                                call('iscsi.targetextent.create', payload)
-
-                            # extent in use
-                            with expect_extent_in_use_failure():
-                                payload = {
-                                    'target': config1['target']['id'],
-                                    'lunid': 1,
-                                    'extent': config2['extent']['id']
-                                }
-                                call('iscsi.targetextent.create', payload)
-
-                            # Now succeed in creating a new target/lun/extent association
-                            payload = {
-                                'target': config1['target']['id'],
-                                'lunid': 1,
-                                'extent': config4['id']
-                            }
-                            call('iscsi.targetextent.create', payload)
-
-                            # Get the current config
-                            textents = call('iscsi.targetextent.query')
-
-                            # Now perform some updates that will not succeed
-                            textent4 = next(textent for textent in textents if textent['extent'] == config4['id'])
-
-                            # Attempt some invalid updates
-                            # LUN in use
-                            with expect_lun_in_use_failure():
-                                payload = {
-                                    'target': textent4['target'],
-                                    'lunid': 0,
-                                    'extent': textent4['extent']
-                                }
-                                call('iscsi.targetextent.update', textent4['id'], payload)
-
-                            # extent in use in another target
-                            with expect_extent_in_use_failure():
-                                payload = {
-                                    'target': textent4['target'],
-                                    'lunid': textent4['lunid'],
-                                    'extent': config3['extent']['id']
-                                }
-                                call('iscsi.targetextent.update', textent4['id'], payload)
-
-                            # extent in use in this target
-                            with expect_extent_in_use_failure():
-                                payload = {
-                                    'target': textent4['target'],
-                                    'lunid': textent4['lunid'],
-                                    'extent': config1['extent']['id']
-                                }
-                                call('iscsi.targetextent.update', textent4['id'], payload)
-
-                            # Move a target to LUN 1
-                            textent2 = next(textent for textent in textents if textent['extent'] == config2['extent']['id'])
-                            payload = {
-                                'target': textent2['target'],
-                                'lunid': 1,
-                                'extent': textent2['extent']
-                            }
-                            call('iscsi.targetextent.update', textent2['id'], payload)
-
-                            # Try to move it (to target1) just by changing the target, will clash
-                            with expect_lun_in_use_failure():
-                                payload = {
-                                    'target': config1['target']['id'],
-                                    'lunid': 1,
-                                    'extent': textent2['extent']
-                                }
-                                call('iscsi.targetextent.update', textent2['id'], payload)
-
-                            # But can move it elsewhere (target3)
-                            payload = {
-                                'target': config3['target']['id'],
-                                'lunid': 1,
-                                'extent': textent2['extent']
-                            }
-                            call('iscsi.targetextent.update', textent2['id'], payload)
-
-                            # Delete textent4 association
-                            call('iscsi.targetextent.delete', textent4['id'])
-
-                            # Now can do the move that previously failed
-                            payload = {
-                                'target': config1['target']['id'],
-                                'lunid': 1,
-                                'extent': textent2['extent']
-                            }
-                            call('iscsi.targetextent.update', textent2['id'], payload)
-
-                            # Restore it
-                            payload = {
-                                'target': config2['target']['id'],
-                                'lunid': 0,
-                                'extent': textent2['extent']
-                            }
-                            call('iscsi.targetextent.update', textent2['id'], payload)
-
-
-def _isns_wait_for_iqn(isns_client, iqn, timeout=10):
-    iqns = set(isns_client.list_targets())
-    while timeout > 0 and iqn not in iqns:
-        sleep(1)
-        iqns = set(isns_client.list_targets())
-    return iqns
-
-
-def test_15_test_isns(request):
-    """
-    Test ability to register targets with iSNS.
-    """
-    # Will use a more unique target name than usual, just in case several test
-    # runs are hitting the same iSNS server at the same time.
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    _host = socket.gethostname()
-    _rand = ''.join(random.choices(string.digits + string.ascii_lowercase, k=12))
-    _name_base = f'isnstest:{_host}:{_rand}'
-    _target1 = f'{_name_base}:1'
-    _target2 = f'{_name_base}:2'
-    _initiator = f'iqn.2005-10.org.freenas.ctl:isnstest:{_name_base}:initiator'
-    _iqn1 = f'{basename}:{_target1}'
-    _iqn2 = f'{basename}:{_target1}'
-
-    with isns_connection(isns_ip, _initiator) as isns_client:
-        # First let's ensure that the targets are not already present.
-        base_iqns = set(isns_client.list_targets())
-        for iqn in [_iqn1, _iqn2]:
-            assert iqn not in base_iqns, iqn
-
-        # Create target1 and ensure it is still not present (because we
-        # haven't switched on iSNS yet).
-        with initiator_portal() as config:
-            with configured_target_to_file_extent(config,
-                                                  _target1,
-                                                  pool_name,
-                                                  dataset_name,
-                                                  file_name) as iscsi_config:
-                iqns = set(isns_client.list_targets())
-                assert _iqn1 not in iqns, _iqn1
-
-                # Now turn on the iSNS server
-                with isns_enabled():
-                    iqns = _isns_wait_for_iqn(isns_client, _iqn1)
-                    assert _iqn1 in iqns, _iqn1
-
-                    # Create another target and ensure it shows up too
-                    with target(_target2,
-                                [{'portal': iscsi_config['portal']['id']}]
-                                ) as target2_config:
-                        target_id = target2_config['id']
-                        with zvol_dataset(zvol):
-                            with zvol_extent(zvol) as extent_config:
-                                extent_id = extent_config['id']
-                                with target_extent_associate(target_id, extent_id):
-                                    iqns = _isns_wait_for_iqn(isns_client, _iqn2)
-                                    for inq in [_iqn1, _iqn2]:
-                                        assert iqn in iqns, iqn
-
-                # Now that iSNS is disabled again, ensure that our target is
-                # no longer advertised
-                iqns = set(isns_client.list_targets())
-                assert _iqn1 not in iqns, _iqn1
-
-        # Finally let's ensure that neither target is present.
-        base_iqns = set(isns_client.list_targets())
-        for iqn in [_iqn1, _iqn2]:
-            assert iqn not in base_iqns, iqn
-
-
-class TestFixtureInitiatorName:
-    """Fixture for test_16_invalid_initiator_name"""
-
-    iqn = f'{basename}:{target_name}'
-
-    @pytest.fixture(scope='class')
-    def create_target(self):
-        with initiator_portal() as config:
-            with configured_target(config, target_name, "FILE"):
-                yield
-
-    params = [
-        (None, True),
-        ("iqn.1991-05.com.microsoft:fake-host", True),
-        ("iqn.1991-05.com.microsoft:fake-/-host", False),
-        ("iqn.1991-05.com.microsoft:fake-#-host", False),
-        ("iqn.1991-05.com.microsoft:fake-%s-host", False),
-        ("iqn.1991-05.com.microsoft:unicode-\u6d4b\u8bd5-ok", True),        # 测试
-        ("iqn.1991-05.com.microsoft:unicode-\u30c6\u30b9\u30c8-ok", True),  # テスト
-        ("iqn.1991-05.com.microsoft:unicode-\u180E-bad", False),            # Mongolian vowel separator
-        ("iqn.1991-05.com.microsoft:unicode-\u2009-bad", False),            # Thin Space
-        ("iqn.1991-05.com.microsoft:unicode-\uFEFF-bad", False),            # Zero width no-break space
-    ]
-
-    @pytest.mark.parametrize("initiator_name, expected", params)
-    def test_16_invalid_initiator_name(self, request, create_target, initiator_name, expected):
-        """
-        Deliberately send SCST some invalid initiator names and ensure it behaves OK.
-        """
-        depends(request, ["iscsi_cmd_00"], scope="session")
-
-        if expected:
-            with iscsi_scsi_connection(truenas_server.ip, TestFixtureInitiatorName.iqn, initiator_name=initiator_name) as s:
-                _verify_inquiry(s)
-        else:
-            with pytest.raises(RuntimeError) as ve:
-                with iscsi_scsi_connection(truenas_server.ip, TestFixtureInitiatorName.iqn, initiator_name=initiator_name) as s:
-                    assert False, "Should not have been able to connect with invalid initiator name."
-                assert 'Unable to connect to' in str(ve), ve
-
-
-def _pr_check_registered_keys(s, expected=[]):
-    opcodes = s.device.opcodes
-    data = s.persistentreservein(opcodes.PERSISTENT_RESERVE_IN.serviceaction.READ_KEYS)
-    assert len(data.result['reservation_keys']) == len(expected), data.result
-    if len(expected):
-        expected_set = set(expected)
-        received_set = set(data.result['reservation_keys'])
-        assert expected_set == received_set, received_set
-    return data.result
-
-
-def _pr_check_reservation(s, expected={'reservation_key': None, 'scope': None, 'type': None}):
-    opcodes = s.device.opcodes
-    data = s.persistentreservein(opcodes.PERSISTENT_RESERVE_IN.serviceaction.READ_RESERVATION)
-    for key, value in expected.items():
-        actual_value = data.result.get(key)
-        assert value == actual_value, data.result
-    return data.result
-
-
-def _pr_register_key(s, value):
-    opcodes = s.device.opcodes
-    s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.REGISTER,
-                           service_action_reservation_key=value)
-
-
-def _pr_unregister_key(s, value):
-    opcodes = s.device.opcodes
-    s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.REGISTER,
-                           reservation_key=value,
-                           service_action_reservation_key=0)
-
-
-def _pr_reserve(s, pr_type, scope=LU_SCOPE, **kwargs):
-    opcodes = s.device.opcodes
-    s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.RESERVE,
-                           scope=scope,
-                           pr_type=pr_type,
-                           **kwargs)
-
-
-def _pr_release(s, pr_type, scope=LU_SCOPE, **kwargs):
-    opcodes = s.device.opcodes
-    s.persistentreserveout(opcodes.PERSISTENT_RESERVE_OUT.serviceaction.RELEASE,
-                           scope=scope,
-                           pr_type=pr_type,
-                           **kwargs)
-
-
-@contextlib.contextmanager
-def _pr_registration(s, key):
-    _pr_register_key(s, key)
-    try:
-        yield
-    finally:
-        _pr_unregister_key(s, key)
-        # There is room for improvement here wrt SPC-5 5.14.11.2.3, but not urgent as
-        # we are hygenic wrt releasing reservations before unregistering keys
-
-
-@contextlib.contextmanager
-def _pr_reservation(s, pr_type, scope=LU_SCOPE, other_connections=[], **kwargs):
-    assert s not in other_connections, "Invalid parameter mix"
-    _pr_reserve(s, pr_type, scope, **kwargs)
-    try:
-        yield
-    finally:
-        _pr_release(s, pr_type, scope, **kwargs)
-        # Do processing as specified by SPC-5 5.14.11.2.2 Releasing
-        # For the time being we will ignore the NUAR bit from SPC-5 7.5.11 Control mode page
-        if pr_type in [PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY,
-                       PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY,
-                       PR_TYPE.WRITE_EXCLUSIVE_ALL_REGISTRANTS,
-                       PR_TYPE.EXCLUSIVE_ACCESS_ALL_REGISTRANTS]:
-            sleep(5)
-            for s2 in other_connections:
-                expect_check_condition(s2, sense_ascq_dict[0x2A04])  # "RESERVATIONS RELEASED"
-
-
-@skip_persistent_reservations
-@pytest.mark.dependency(name="iscsi_basic_persistent_reservation")
-def test_17_basic_persistent_reservation(request):
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    with initiator_portal() as config:
-        with configured_target_to_zvol_extent(config, target_name, zvol):
-            iqn = f'{basename}:{target_name}'
-            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                TUR(s)
-
-                _pr_check_registered_keys(s, [])
-                _pr_check_reservation(s)
-
-                with _pr_registration(s, PR_KEY1):
-                    _pr_check_registered_keys(s, [PR_KEY1])
-                    _pr_check_reservation(s)
-
-                    with _pr_reservation(s, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1):
-                        _pr_check_registered_keys(s, [PR_KEY1])
-                        _pr_check_reservation(s, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
-
-                    _pr_check_registered_keys(s, [PR_KEY1])
-                    _pr_check_reservation(s)
-
-                _pr_check_registered_keys(s, [])
-                _pr_check_reservation(s)
-
-
-@contextlib.contextmanager
-def _pr_expect_reservation_conflict(s):
-    try:
-        yield
-        assert False, "Failed to get expected PERSISTENT CONFLICT"
-    except Exception as e:
-        if e.__class__.__name__ != str(CheckType.RESERVATION_CONFLICT):
-            raise e
-
-
-def _check_target_rw_paths(s1, s2):
-    """
-    Check that the two supplied paths can read/write data, and they point at the same LUN.
-    """
-    zeros = bytearray(512)
-    deadbeef = bytearray.fromhex('deadbeef') * 128
-    abba = bytearray.fromhex('abbaabba') * 128
-
-    # First let's write zeros to the first 12 blocks using WRITE SAME (16)
-    s1.writesame16(0, 12, zeros)
-
-    # Check results using READ (16)
-    for s in (s1, s2):
-        for lba in range(0, 12):
-            r = s.read16(lba, 1)
-            assert r.datain == zeros, r.datain
-
-    # Update some blocks from each initiator using WRITE SAME
-    s1.writesame16(0, 6, deadbeef)
-    s2.writesame16(6, 6, abba)
-
-    # Check results using READ (16)
-    for s in (s1, s2):
-        for lba in range(0, 6):
-            r = s.read16(lba, 1)
-            assert r.datain == deadbeef, r.datain
-        for lba in range(6, 12):
-            r = s.read16(lba, 1)
-            assert r.datain == abba, r.datain
-
-
-def _check_persistent_reservations(s1, s2):
-    #
-    # First just do a some basic tests (register key, reserve, release, unregister key)
-    #
-    _pr_check_registered_keys(s1, [])
-    _pr_check_reservation(s1)
-    _pr_check_registered_keys(s2, [])
-    _pr_check_reservation(s2)
-
-    with _pr_registration(s1, PR_KEY1):
-        _pr_check_registered_keys(s1, [PR_KEY1])
-        _pr_check_reservation(s1)
-        _pr_check_registered_keys(s2, [PR_KEY1])
-        _pr_check_reservation(s2)
-
-        with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
-            _pr_check_registered_keys(s1, [PR_KEY1])
-            _pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
-            _pr_check_registered_keys(s2, [PR_KEY1])
-            _pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
-
-        _pr_check_registered_keys(s1, [PR_KEY1])
-        _pr_check_reservation(s1)
-        _pr_check_registered_keys(s2, [PR_KEY1])
-        _pr_check_reservation(s2)
-
-        with _pr_registration(s2, PR_KEY2):
-            _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
-            _pr_check_reservation(s1)
-            _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
-            _pr_check_reservation(s2)
-
-            with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
-                _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
-                _pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
-                _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
-                _pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
-
-            _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
-            _pr_check_reservation(s1)
-            _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
-            _pr_check_reservation(s2)
-
-            with _pr_reservation(s2, PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY, reservation_key=PR_KEY2, other_connections=[s1]):
-                _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
-                _pr_check_reservation(s1, {'reservation_key': PR_KEY2, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY})
-                _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
-                _pr_check_reservation(s2, {'reservation_key': PR_KEY2, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY})
-
-            _pr_check_registered_keys(s1, [PR_KEY1, PR_KEY2])
-            _pr_check_reservation(s1)
-            _pr_check_registered_keys(s2, [PR_KEY1, PR_KEY2])
-            _pr_check_reservation(s2)
-
-        _pr_check_registered_keys(s1, [PR_KEY1])
-        _pr_check_reservation(s1)
-        _pr_check_registered_keys(s2, [PR_KEY1])
-        _pr_check_reservation(s2)
-
-    _pr_check_registered_keys(s1, [])
-    _pr_check_reservation(s1)
-    _pr_check_registered_keys(s2, [])
-    _pr_check_reservation(s2)
-
-    #
-    # Now let's fail some stuff
-    # See:
-    # - SPC-5 5.14 Table 66
-    # - SBC-4 4.17 Table 13
-    #
-    zeros = bytearray(512)
-    dancing_queen = bytearray.fromhex('00abba00') * 128
-    deadbeef = bytearray.fromhex('deadbeef') * 128
-    with _pr_registration(s1, PR_KEY1):
-        with _pr_registration(s2, PR_KEY2):
-
-            # With registrations only, both initiators can write
-            s1.write16(0, 1, deadbeef)
-            s2.write16(1, 1, dancing_queen)
-            r = s1.read16(1, 1)
-            assert r.datain == dancing_queen, r.datain
-            r = s2.read16(0, 1)
-            assert r.datain == deadbeef, r.datain
-
-            with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
-                s1.writesame16(0, 2, zeros)
-                r = s2.read16(0, 2)
-                assert r.datain == zeros + zeros, r.datain
-
-                with _pr_expect_reservation_conflict(s2):
-                    s2.write16(1, 1, dancing_queen)
-
-                r = s2.read16(0, 2)
-                assert r.datain == zeros + zeros, r.datain
-
-                with _pr_expect_reservation_conflict(s2):
-                    with _pr_reservation(s2, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY2):
-                        pass
-
-            with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS, reservation_key=PR_KEY1, other_connections=[s2]):
-                with _pr_expect_reservation_conflict(s2):
-                    r = s2.read16(0, 2)
-                    assert r.datain == zeros + zeros, r.datain
-
-            with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY, reservation_key=PR_KEY1, other_connections=[s2]):
-                r = s2.read16(0, 2)
-                assert r.datain == zeros + zeros, r.datain
-
-        # s2 no longer is registered
-        with _pr_reservation(s1, PR_TYPE.EXCLUSIVE_ACCESS_REGISTRANTS_ONLY, reservation_key=PR_KEY1):
-            with _pr_expect_reservation_conflict(s2):
-                r = s2.read16(0, 2)
-                assert r.datain == zeros + zeros, r.datain
-
-        with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE_REGISTRANTS_ONLY, reservation_key=PR_KEY1):
-            r = s2.read16(0, 2)
-            assert r.datain == zeros + zeros, r.datain
-
-
-@skip_persistent_reservations
-@skip_multi_initiator
-def test_18_persistent_reservation_two_initiators(request):
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    with initiator_portal() as config:
-        with configured_target_to_zvol_extent(config, target_name, zvol):
-            iqn = f'{basename}:{target_name}'
-            with iscsi_scsi_connection(truenas_server.ip, iqn) as s1:
-                TUR(s1)
-                initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second"
-                with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_name2) as s2:
-                    TUR(s2)
-                    _check_persistent_reservations(s1, s2)
-
-
-def _get_node(timeout=None):
-    return call('failover.node')
-
-
-def _get_ha_failover_status():
-    # Make sure we're talking to the master
-    return call('failover.status')
-
-
-def _get_ha_remote_failover_status():
-    return call('failover.call_remote', 'failover.status')
-
-
-def _get_ha_failover_in_progress():
-    # Make sure we're talking to the master
-    return call('failover.in_progress')
-
-
-def _check_master():
-    status = _get_ha_failover_status()
-    assert status == 'MASTER'
-
-
-def _check_ha_node_configuration():
-    both_nodes = ['A', 'B']
-    # Let's perform some sanity checking wrt controller and IP address
-    # First get node and calculate othernode
-    node = _get_node()
-    assert node in both_nodes
-    _check_master()
-
-    # Now let's get IPs and ensure that
-    # - Node A has truenas_server.nodea_ip
-    # - Node B has truenas_server.nodeb_ip
-    # We will need this later when we start checking TPG, etc
-    ips = {}
-    for anode in both_nodes:
-        ips[anode] = set()
-        if anode == node:
-            interfaces = call('interface.query')
-        else:
-            interfaces = call('failover.call_remote', 'interface.query')
-
-        for i in interfaces:
-            for alias in i['state']['aliases']:
-                if alias.get('type') == 'INET':
-                    ips[anode].add(alias['address'])
-    # Ensure that truenas_server.nodea_ip and truenas_server.nodeb_ip are what we expect
-    assert truenas_server.nodea_ip in ips['A']
-    assert truenas_server.nodea_ip not in ips['B']
-    assert truenas_server.nodeb_ip in ips['B']
-    assert truenas_server.nodeb_ip not in ips['A']
-
-
-def _verify_ha_report_target_port_groups(s, tpgs, active_tpg):
-    """
-    Verify that the REPORT TARGET PORT GROUPS command returns the expected
-    results.
-    """
-    x = s.reporttargetportgroups()
-    for tpg_desc in x.result['target_port_group_descriptors']:
-        tpg_id = tpg_desc['target_port_group']
-        ids = set([x['relative_target_port_id'] for x in tpg_desc['target_ports']])
-        assert ids == set(tpgs[tpg_id]), ids
-        # See SPC-5 6.36 REPORT TARGET PORT GROUPS
-        # Active/Optimized is 0
-        # Active/Non-optimized is 1
-        if tpg_id == active_tpg:
-            assert tpg_desc['asymmetric_access_state'] == 0, tpg_desc
-        else:
-            assert tpg_desc['asymmetric_access_state'] == 1, tpg_desc
-
-
-def _get_active_target_portal_group():
-    _check_master()
-    node = _get_node()
-    if node == 'A':
-        return CONTROLLER_A_TARGET_PORT_GROUP_ID
-    elif node == 'B':
-        return CONTROLLER_B_TARGET_PORT_GROUP_ID
-    return None
-
-
-def _wait_for_alua_settle(retries=20):
-    print("Checking ALUA status...")
-    while retries:
-        if call('iscsi.alua.settled'):
-            print("ALUA is settled")
-            break
-        retries -= 1
-        print("Waiting for ALUA to settle")
-        sleep(5)
-
-
-def _ha_reboot_master(delay=900):
-    """
-    Reboot the MASTER node and wait for both the new MASTER
-    and new BACKUP to become available.
-    """
-    get_node_timeout = 20
-    orig_master_node = _get_node()
-    new_master_node = other_node(orig_master_node)
-
-    call('system.reboot', 'iSCSI test')
-
-    # First we'll loop until the node is no longer the orig_node
-    new_master = False
-    while not new_master:
-        try:
-            # There are times when we don't get a response at all (albeit
-            # in a bhyte HA-VM pair), so add a timeout to catch this situation.
-            if _get_node(timeout=get_node_timeout) == new_master_node:
-                new_master = True
-                break
-        except requests.exceptions.Timeout:
-            delay = delay - get_node_timeout
-        except Exception:
-            delay = delay - 1
-        if delay <= 0:
-            break
-        print("Waiting for MASTER")
-        sleep(1)
-
-    if not new_master:
-        raise RuntimeError('Did not switch to new controller.')
-
-    # OK, we're on the new master, now wait for the other controller
-    # to become BACKUP.
-    new_backup = False
-    while not new_backup:
-        try:
-            if _get_ha_remote_failover_status() == 'BACKUP':
-                new_backup = True
-                break
-        except Exception:
-            pass
-        delay = delay - 5
-        if delay <= 0:
-            break
-        print("Waiting for BACKUP")
-        sleep(5)
-
-    if not new_backup:
-        raise RuntimeError('Backup controller did not surface.')
-
-    # Ensure that a failover is still not in progress
-    in_progress = True
-    while in_progress:
-        try:
-            in_progress = _get_ha_failover_in_progress()
-            if not in_progress:
-                break
-        except Exception:
-            pass
-        delay = delay - 5
-        if delay <= 0:
-            break
-        print("Waiting while in progress")
-        sleep(5)
-
-    if in_progress:
-        raise RuntimeError('Failover never completed.')
-
-    # Finally check the ALUA status
-    _wait_for_alua_settle()
-
-
-def _ensure_alua_state(state):
-    results = call('iscsi.global.config')
-    assert results['alua'] == state, results
-
-
-@pytest.mark.dependency(name="iscsi_alua_config")
-@pytest.mark.timeout(900)
-def test_19_alua_config(request):
-    """
-    Test various aspects of ALUA configuration.
-
-    When run against a HA system this test will perform TWO reboots to
-    test failover wrt iSCSI ALUA targets.
-
-    The second reboot was added to return the system to the original ACTIVE
-    node.  This means that subsequent tests will run on the same node that
-    the previous tests started on, thereby simplifying log analysis.
-    """
-    # First ensure ALUA is off
-    _ensure_alua_state(False)
-
-    if ha:
-        _check_ha_node_configuration()
-
-    # Next create a target
-    with initiator_portal() as config:
-        with configured_target_to_file_extent(config,
-                                              target_name,
-                                              pool_name,
-                                              dataset_name,
-                                              file_name
-                                              ) as iscsi_config:
-            # Login to the target and ensure that things look reasonable.
-            iqn = f'{basename}:{target_name}'
-            api_serial_number = iscsi_config['extent']['serial']
-            api_naa = iscsi_config['extent']['naa']
-            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                verify_ha_inquiry(s, api_serial_number, api_naa)
-
-            if ha:
-                # Only perform this section on a HA system
-
-                with alua_enabled():
-                    _ensure_alua_state(True)
-                    _wait_for_alua_settle()
-
-                    # We will login to the target on BOTH controllers and make sure
-                    # we see the same target.  Observe that we supply tpgs=1 as
-                    # part of the check
-                    with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
-                        verify_ha_inquiry(s1, api_serial_number, api_naa, 1)
-                        with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2:
-                            verify_ha_inquiry(s2, api_serial_number, api_naa, 1)
-
-                            verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
-                            verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
-
-                            tpgs = {
-                                CONTROLLER_A_TARGET_PORT_GROUP_ID: [1],
-                                CONTROLLER_B_TARGET_PORT_GROUP_ID: [32001]
-                            }
-                            active_tpg = _get_active_target_portal_group()
-                            _verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
-                            _verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
-
-                # Ensure ALUA is off again
-                _ensure_alua_state(False)
-
-        # At this point we have no targets and ALUA is off
-        if ha:
-            # Now turn on ALUA again
-            with alua_enabled():
-                _ensure_alua_state(True)
-
-                # Then create a target (with ALUA already enabled)
-                with configured_target_to_file_extent(config,
-                                                      target_name,
-                                                      pool_name,
-                                                      dataset_name,
-                                                      file_name
-                                                      ) as iscsi_config:
-                    iqn = f'{basename}:{target_name}'
-                    api_serial_number = iscsi_config['extent']['serial']
-                    api_naa = iscsi_config['extent']['naa']
-                    # Login to the target and ensure that things look reasonable.
-                    with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
-                        verify_ha_inquiry(s1, api_serial_number, api_naa, 1)
-
-                        with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2:
-                            verify_ha_inquiry(s2, api_serial_number, api_naa, 1)
-
-                            verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
-                            verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
-
-                            # Use the tpgs & active_tpg from above
-                            _verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
-                            _verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
-
-                            _check_target_rw_paths(s1, s2)
-
-                            # Let's failover
-                            _ha_reboot_master()
-                            expect_check_condition(s1, sense_ascq_dict[0x2900])  # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
-                            expect_check_condition(s2, sense_ascq_dict[0x2900])  # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
-
-                            _check_ha_node_configuration()
-                            new_active_tpg = _get_active_target_portal_group()
-                            assert new_active_tpg != active_tpg
-
-                            verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
-                            verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
-
-                            _verify_ha_report_target_port_groups(s1, tpgs, new_active_tpg)
-                            _verify_ha_report_target_port_groups(s2, tpgs, new_active_tpg)
-
-                            _check_target_rw_paths(s1, s2)
-
-                            # Create a new target
-                            with configured_target_to_zvol_extent(config, f'{target_name}b', zvol) as iscsi_config2:
-                                iqn2 = f'{basename}:{target_name}b'
-                                api_serial_number2 = iscsi_config2['extent']['serial']
-                                api_naa2 = iscsi_config2['extent']['naa']
-                                tpgs2 = {
-                                    CONTROLLER_A_TARGET_PORT_GROUP_ID: [1, 2],
-                                    CONTROLLER_B_TARGET_PORT_GROUP_ID: [32001, 32002]
-                                }
-                                # Wait until ALUA settles, so that we know the target is available on the STANDBY node.
-                                _wait_for_alua_settle()
-                                # Login to the target on each controller
-                                with iscsi_scsi_connection(truenas_server.nodea_ip, iqn2) as s3:
-                                    verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1)
-                                    initiator_name3 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:third"
-                                    with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2, initiator_name=initiator_name3) as s4:
-                                        verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1)
-                                        verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID)
-                                        verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID)
-                                        _verify_ha_report_target_port_groups(s3, tpgs2, new_active_tpg)
-                                        _verify_ha_report_target_port_groups(s4, tpgs2, new_active_tpg)
-                                        _check_target_rw_paths(s3, s4)
-
-                                        # Reboot again (to failback to the original ACTIVE node)
-                                        _ha_reboot_master()
-                                        for s in [s1, s2, s3, s4]:
-                                            expect_check_condition(s, sense_ascq_dict[0x2900])  # "POWER ON, RESET, OR BUS DEVICE RESET OCCURRED"
-
-                                        # After the 2nd reboot we will switch back to using the original active_tpg
-
-                                        # Check the new target again
-                                        verify_ha_inquiry(s3, api_serial_number2, api_naa2, 1)
-                                        verify_ha_inquiry(s4, api_serial_number2, api_naa2, 1)
-                                        verify_ha_device_identification(s3, api_naa2, 2, CONTROLLER_A_TARGET_PORT_GROUP_ID)
-                                        verify_ha_device_identification(s4, api_naa2, 32002, CONTROLLER_B_TARGET_PORT_GROUP_ID)
-                                        _verify_ha_report_target_port_groups(s3, tpgs2, active_tpg)
-                                        _verify_ha_report_target_port_groups(s4, tpgs2, active_tpg)
-                                        _check_target_rw_paths(s3, s4)
-
-                                        # Check the original target
-                                        verify_ha_inquiry(s1, api_serial_number, api_naa, 1)
-                                        verify_ha_inquiry(s2, api_serial_number, api_naa, 1)
-                                        verify_ha_device_identification(s1, api_naa, 1, CONTROLLER_A_TARGET_PORT_GROUP_ID)
-                                        verify_ha_device_identification(s2, api_naa, 32001, CONTROLLER_B_TARGET_PORT_GROUP_ID)
-                                        _verify_ha_report_target_port_groups(s1, tpgs2, active_tpg)
-                                        _verify_ha_report_target_port_groups(s2, tpgs2, active_tpg)
-                                        _check_target_rw_paths(s1, s2)
-                            # Second target has been removed again
-                            _wait_for_alua_settle()
-                            _verify_ha_report_target_port_groups(s1, tpgs, active_tpg)
-                            _verify_ha_report_target_port_groups(s2, tpgs, active_tpg)
-
-            # Ensure ALUA is off again
-            _ensure_alua_state(False)
-
-
-@skip_persistent_reservations
-@skip_multi_initiator
-@skip_ha_tests
-def test_20_alua_basic_persistent_reservation(request):
-    # Don't need to specify "iscsi_cmd_00" here
-    depends(request, ["iscsi_alua_config", "iscsi_basic_persistent_reservation"], scope="session")
-    # Turn on ALUA
-    with alua_enabled():
-        with initiator_portal() as config:
-            with configured_target_to_file_extent(config, target_name, pool_name, dataset_name, file_name):
-                iqn = f'{basename}:{target_name}'
-                # Login to the target on each controller
-                with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
-                    with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s2:
-                        # Now we can do some basic tests
-                        _pr_check_registered_keys(s1, [])
-                        _pr_check_registered_keys(s2, [])
-                        _pr_check_reservation(s1)
-                        _pr_check_reservation(s2)
-
-                        with _pr_registration(s1, PR_KEY1):
-                            _pr_check_registered_keys(s1, [PR_KEY1])
-                            _pr_check_registered_keys(s2, [PR_KEY1])
-                            _pr_check_reservation(s1)
-                            _pr_check_reservation(s2)
-
-                            with _pr_reservation(s1, PR_TYPE.WRITE_EXCLUSIVE, reservation_key=PR_KEY1, other_connections=[s2]):
-                                _pr_check_registered_keys(s1, [PR_KEY1])
-                                _pr_check_registered_keys(s2, [PR_KEY1])
-                                _pr_check_reservation(s1, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
-                                _pr_check_reservation(s2, {'reservation_key': PR_KEY1, 'scope': LU_SCOPE, 'type': PR_TYPE.WRITE_EXCLUSIVE})
-
-                            _pr_check_registered_keys(s1, [PR_KEY1])
-                            _pr_check_registered_keys(s2, [PR_KEY1])
-                            _pr_check_reservation(s1)
-                            _pr_check_reservation(s2)
-
-                        _pr_check_registered_keys(s1, [])
-                        _pr_check_registered_keys(s2, [])
-                        _pr_check_reservation(s1)
-                        _pr_check_reservation(s2)
-
-    # Ensure ALUA is off again
-    _ensure_alua_state(False)
-
-
-@skip_persistent_reservations
-@skip_multi_initiator
-@skip_ha_tests
-def test_21_alua_persistent_reservation_two_initiators(request):
-    depends(request, ["iscsi_alua_config", "iscsi_basic_persistent_reservation"], scope="session")
-    with alua_enabled():
-        with initiator_portal() as config:
-            with configured_target_to_zvol_extent(config, target_name, zvol):
-                iqn = f'{basename}:{target_name}'
-                # Login to the target on each controller
-                with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s1:
-                    TUR(s1)
-                    initiator_name2 = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}:second"
-                    with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn, initiator_name=initiator_name2) as s2:
-                        TUR(s2)
-                        _check_persistent_reservations(s1, s2)
-                        # Do it all again, the other way around
-                        _check_persistent_reservations(s2, s1)
-
-
-def _get_designator(s, designator_type):
-    x = s.inquiry(evpd=1, page_code=0x83)
-    for designator in x.result["designator_descriptors"]:
-        if designator["designator_type"] == designator_type:
-            del designator["piv"]
-            return designator
-
-
-def _xcopy_test(s1, s2, adds1=None, adds2=None):
-    zeros = bytearray(512)
-    deadbeef = bytearray.fromhex("deadbeef") * 128
-
-    def validate_blocks(s, start, end, beefy_list):
-        for lba in range(start, end):
-            r = s.read16(lba, 1)
-            if lba in beefy_list:
-                assert r.datain == deadbeef, r.datain
-            else:
-                assert r.datain == zeros, r.datain
-
-    d1 = _get_designator(s1, 3)
-    d2 = _get_designator(s2, 3)
-
-    # First let's write zeros to the first 20 blocks using WRITE SAME (16)
-    s1.writesame16(0, 20, zeros)
-    s2.writesame16(0, 20, zeros)
-
-    # Write some deadbeef
-    s1.write16(1, 1, deadbeef)
-    s1.write16(3, 1, deadbeef)
-    s1.write16(4, 1, deadbeef)
-
-    # Check that the blocks were written correctly
-    validate_blocks(s1, 0, 20, [1, 3, 4])
-    validate_blocks(s2, 0, 20, [])
-    if adds1:
-        validate_blocks(adds1, 0, 20, [1, 3, 4])
-    if adds2:
-        validate_blocks(adds2, 0, 20, [])
-
-    # XCOPY
-    s1.extendedcopy4(
-        priority=1,
-        list_identifier=0x34,
-        target_descriptor_list=[
-            {
-                "descriptor_type_code": "Identification descriptor target descriptor",
-                "peripheral_device_type": 0x00,
-                "target_descriptor_parameters": d1,
-                "device_type_specific_parameters": {"disk_block_length": 512},
-            },
-            {
-                "descriptor_type_code": "Identification descriptor target descriptor",
-                "peripheral_device_type": 0x00,
-                "target_descriptor_parameters": d2,
-                "device_type_specific_parameters": {"disk_block_length": 512},
-            },
-        ],
-        segment_descriptor_list=[
-            {
-                "descriptor_type_code": "Copy from block device to block device",
-                "dc": 1,
-                "source_target_descriptor_id": 0,
-                "destination_target_descriptor_id": 1,
-                "block_device_number_of_blocks": 4,
-                "source_block_device_logical_block_address": 1,
-                "destination_block_device_logical_block_address": 10,
-            }
-        ],
-    )
-
-    validate_blocks(s1, 0, 20, [1, 3, 4])
-    validate_blocks(s2, 0, 20, [10, 12, 13])
-    if adds1:
-        validate_blocks(adds1, 0, 20, [1, 3, 4])
-    if adds2:
-        validate_blocks(adds2, 0, 20, [10, 12, 13])
-
-
-@pytest.mark.parametrize('extent2', ["FILE", "VOLUME"])
-@pytest.mark.parametrize('extent1', ["FILE", "VOLUME"])
-def test_22_extended_copy(request, extent1, extent2):
-    # print(f"Extended copy {extent1} -> {extent2}")
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    name1 = f"{target_name}x1"
-    name2 = f"{target_name}x2"
-    iqn1 = f'{basename}:{name1}'
-    iqn2 = f'{basename}:{name2}'
-
-    with initiator_portal() as config:
-        with configured_target(config, name1, extent1):
-            with configured_target(config, name2, extent2):
-                with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
-                    with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
-                        s1.testunitready()
-                        s2.testunitready()
-                        _xcopy_test(s1, s2)
-
-
-@skip_ha_tests
-@pytest.mark.parametrize('extent2', ["FILE", "VOLUME"])
-@pytest.mark.parametrize('extent1', ["FILE", "VOLUME"])
-def test_23_ha_extended_copy(request, extent1, extent2):
-    depends(request, ["iscsi_alua_config"], scope="session")
-
-    name1 = f"{target_name}x1"
-    name2 = f"{target_name}x2"
-    iqn1 = f'{basename}:{name1}'
-    iqn2 = f'{basename}:{name2}'
-
-    with alua_enabled():
-        with initiator_portal() as config:
-            with configured_target(config, name1, extent1):
-                with configured_target(config, name2, extent2):
-                    with iscsi_scsi_connection(truenas_server.nodea_ip, iqn1) as sa1:
-                        with iscsi_scsi_connection(truenas_server.nodea_ip, iqn2) as sa2:
-                            with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn1) as sb1:
-                                with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn2) as sb2:
-                                    sa1.testunitready()
-                                    sa2.testunitready()
-                                    sb1.testunitready()
-                                    sb2.testunitready()
-                                    _xcopy_test(sa1, sa2, sb1, sb2)
-                                    # Now re-run the test using the other controller
-                                    _xcopy_test(sb1, sb2, sa1, sa2)
-
-
-def test_24_iscsi_target_disk_login(request):
-    """
-    Tests whether a logged in iSCSI target shows up in disks.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-    iqn = f'{basename}:{target_name}'
-
-    def fetch_disk_data(fetch_remote=False):
-        data = {}
-        if fetch_remote:
-            data['failover.get_disks_local'] = set(call('failover.call_remote', 'failover.get_disks_local'))
-            data['disk.get_unused'] = set([d['devname'] for d in call('failover.call_remote', 'disk.get_unused')])
-        else:
-            data['failover.get_disks_local'] = set(call('failover.get_disks_local'))
-            data['disk.get_unused'] = set([d['devname'] for d in call('disk.get_unused')])
-        return data
-
-    def check_disk_data(old, new, whenstr, internode_check=False):
-        # There are some items that we can't compare between 2 HA nodes
-        SINGLE_NODE_COMPARE_ONLY = ['disk.get_unused']
-        for key in old:
-            if internode_check and key in SINGLE_NODE_COMPARE_ONLY:
-                continue
-            assert old[key] == new[key], f"{key} does not match {whenstr}: {old[key]} {new[key]}"
-
-    if ha:
-        # In HA we will create an ALUA target and check the STANDBY node
-        data_before_l = fetch_disk_data()
-        data_before_r = fetch_disk_data(True)
-        check_disk_data(data_before_l, data_before_r, "initially", True)
-        with alua_enabled():
-            with initiator_portal() as config:
-                with configured_target_to_zvol_extent(config, target_name, zvol):
-                    sleep(5)
-                    data_after_l = fetch_disk_data()
-                    data_after_r = fetch_disk_data(True)
-                    check_disk_data(data_before_l, data_after_l, "after iSCSI ALUA target creation (Active)")
-                    check_disk_data(data_before_r, data_after_r, "after iSCSI ALUA target creation (Standby)")
-    else:
-        # In non-HA we will create a target and login to it from the same TrueNAS system
-        # Just in case IP was supplied as a hostname use actual_ip
-        actual_ip = get_ip_addr(truenas_server.ip)
-        data_before = fetch_disk_data()
-        with initiator_portal() as config:
-            with configured_target_to_zvol_extent(config, target_name, zvol):
-                data_after = fetch_disk_data()
-                check_disk_data(data_before, data_after, "after iSCSI target creation")
-
-                # Discover the target (loopback)
-                results = SSH_TEST(f"iscsiadm -m discovery -t st -p {actual_ip}", user, password)
-                assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
-                # Make SURE we find the target at the ip we expect
-                found_iqn = False
-                for line in results['stdout'].split('\n'):
-                    if not line.startswith(f'{actual_ip}:'):
-                        continue
-                    if line.split()[1] == iqn:
-                        found_iqn = True
-                assert found_iqn, f'Failed to find IQN {iqn}: out: {results["output"]}'
-
-                # Login the target
-                results = SSH_TEST(f"iscsiadm -m node -T {iqn} -p {actual_ip}:3260 --login", user, password)
-                assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
-                # Allow some time for the disk to surface
-                sleep(5)
-                # Then check that everything looks OK
-                try:
-                    data_after = fetch_disk_data()
-                    check_disk_data(data_before, data_after, "after iSCSI target login")
-                finally:
-                    results = SSH_TEST(f"iscsiadm -m node -T {iqn} -p {actual_ip}:3260 --logout", user, password)
-                    assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
-
-
-def test_25_resize_target_zvol(request):
-    """
-    Verify that an iSCSI client is notified when the size of a ZVOL underlying
-    an iSCSI extent is modified.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    with initiator_portal() as config:
-        with configured_target_to_zvol_extent(config, target_name, zvol, volsize=MB_100) as config:
-            iqn = f'{basename}:{target_name}'
-            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                TUR(s)
-                assert MB_100 == read_capacity16(s)
-                # Have checked using tcpdump/wireshark that a SCSI Asynchronous Event Notification
-                # gets sent 0x2A09: "CAPACITY DATA HAS CHANGED"
-                zvol_resize(zvol, MB_256)
-                assert MB_256 == read_capacity16(s)
-                # But we can do better (in terms of test) ... turn AEN off,
-                # which means we will get a CHECK CONDITION on the next resize
-                SSH_TEST(f"echo 1 > /sys/kernel/scst_tgt/targets/iscsi/{iqn}/aen_disabled", user, password)
-                zvol_resize(zvol, MB_512)
-                expect_check_condition(s, sense_ascq_dict[0x2A09])  # "CAPACITY DATA HAS CHANGED"
-                assert MB_512 == read_capacity16(s)
-                # Try to shrink the ZVOL again.  Expect an error
-                with pytest.raises(ValidationErrors):
-                    zvol_resize(zvol, MB_256)
-                assert MB_512 == read_capacity16(s)
-
-
-def test_26_resize_target_file(request):
-    """
-    Verify that an iSCSI client is notified when the size of a file-based
-    iSCSI extent is modified.
-    """
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    with initiator_portal() as config:
-        with configured_target_to_file_extent(config,
-                                              target_name,
-                                              pool_name,
-                                              dataset_name,
-                                              file_name,
-                                              filesize=MB_100) as config:
-            iqn = f'{basename}:{target_name}'
-            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                extent_id = config['extent']['id']
-                TUR(s)
-                assert MB_100 == read_capacity16(s)
-                file_extent_resize(extent_id, MB_256)
-                assert MB_256 == read_capacity16(s)
-                # Turn AEN off so that we will get a CHECK CONDITION on the next resize
-                SSH_TEST(f"echo 1 > /sys/kernel/scst_tgt/targets/iscsi/{iqn}/aen_disabled", user, password)
-                file_extent_resize(extent_id, MB_512)
-                expect_check_condition(s, sense_ascq_dict[0x2A09])  # "CAPACITY DATA HAS CHANGED"
-                assert MB_512 == read_capacity16(s)
-                # Try to shrink the file again.  Expect an error
-                with pytest.raises(ValidationErrors):
-                    file_extent_resize(extent_id, MB_256)
-                assert MB_512 == read_capacity16(s)
-
-
-@skip_multi_initiator
-def test_27_initiator_group(request):
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    initiator_base = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}"
-    initiator_iqn1 = f"{initiator_base}:one"
-    initiator_iqn2 = f"{initiator_base}:two"
-    initiator_iqn3 = f"{initiator_base}:three"
-
-    # First create a target without an initiator group specified
-    with initiator_portal() as config1:
-        with configured_target_to_zvol_extent(config1, target_name, zvol) as config:
-            iqn = f'{basename}:{target_name}'
-
-            # Ensure we can access from all initiators
-            for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
-                with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
-                    TUR(s)
-
-            # Now set the initiator id to the empty (Allow All Initiators) one
-            # that we created above.  Then ensure we can still read access the
-            # target from all initiators
-            set_target_initiator_id(config['target']['id'], config['initiator']['id'])
-            for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
-                with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
-                    TUR(s)
-
-            # Now create another initiator group, which contains the first two
-            # initiators only and modify the target to use it
-            with initiator("two initiators only", [initiator_iqn1, initiator_iqn2]) as twoinit_config:
-                set_target_initiator_id(config['target']['id'], twoinit_config['id'])
-                # First two initiators can connect to the target
-                for initiator_iqn in [initiator_iqn1, initiator_iqn2]:
-                    with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
-                        TUR(s)
-                # Third initiator cannot connect to the target
-                with pytest.raises(RuntimeError) as ve:
-                    with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn3) as s:
-                        TUR(s)
-                assert 'Unable to connect to' in str(ve), ve
-                # Clear it again
-                set_target_initiator_id(config['target']['id'], None)
-
-            for initiator_iqn in [initiator_iqn1, initiator_iqn2, initiator_iqn3]:
-                with iscsi_scsi_connection(truenas_server.ip, iqn, initiator_name=initiator_iqn) as s:
-                    TUR(s)
-
-
-def test_28_portal_access(request):
-    """
-    Verify that an iSCSI client can access a target on the specified
-    portal.
-
-    For a HA ALUA target, check the constituent interfaces.
-    """
-    iqn = f'{basename}:{target_name}'
-    with initiator() as initiator_config:
-        with portal(listen=[{'ip': get_ip_addr(truenas_server.ip)}]) as portal_config:
-            config1 = {'initiator': initiator_config, 'portal': portal_config}
-            with configured_target_to_zvol_extent(config1, target_name, zvol, volsize=MB_100):
-                with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                    TUR(s)
-                    assert MB_100 == read_capacity16(s)
-                # Now, if we are in a HA config turn on ALUA and test
-                # the specific IP addresses
-                if ha:
-                    with alua_enabled():
-                        _ensure_alua_state(True)
-
-                        with pytest.raises(RuntimeError) as ve:
-                            with iscsi_scsi_connection(truenas_server.ip, iqn) as s:
-                                TUR(s)
-                        assert 'Unable to connect to' in str(ve), ve
-
-                        with iscsi_scsi_connection(truenas_server.nodea_ip, iqn) as s:
-                            TUR(s)
-                            assert MB_100 == read_capacity16(s)
-
-                        with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn) as s:
-                            TUR(s)
-                            assert MB_100 == read_capacity16(s)
-
-
-def test_29_multiple_extents():
-    """
-    Verify that an iSCSI client can access multiple target LUNs
-    when multiple extents are configured.
-
-    Also validate that an extent serial number cannot be reused, and
-    that supplying an empty string serial number means one gets
-    generated.
-    """
-    iqn = f'{basename}:{target_name}'
-    with initiator_portal() as config:
-        portal_id = config['portal']['id']
-        with target(target_name, [{'portal': portal_id}]) as target_config:
-            target_id = target_config['id']
-            with dataset(dataset_name):
-                with file_extent(pool_name, dataset_name, "target.extent1", filesize=MB_100, extent_name="extent1") as extent1_config:
-                    with file_extent(pool_name, dataset_name, "target.extent2", filesize=MB_256, extent_name="extent2") as extent2_config:
-                        with target_extent_associate(target_id, extent1_config['id'], 0):
-                            with target_extent_associate(target_id, extent2_config['id'], 1):
-                                with iscsi_scsi_connection(truenas_server.ip, iqn, 0) as s:
-                                    TUR(s)
-                                    assert MB_100 == read_capacity16(s)
-                                with iscsi_scsi_connection(truenas_server.ip, iqn, 1) as s:
-                                    TUR(s)
-                                    assert MB_256 == read_capacity16(s)
-
-                                # Now try to create another extent using the same serial number
-                                # We expect this to fail.
-                                with pytest.raises(ValidationErrors) as ve:
-                                    with file_extent(pool_name, dataset_name, "target.extent3", filesize=MB_512,
-                                                     extent_name="extent3", serial=extent1_config['serial']):
-                                        pass
-                                assert ve.value.errors == [
-                                    ValidationError('iscsi_extent_create.serial', 'Serial number must be unique', errno.EINVAL)
-                                ]
-
-                                with file_extent(pool_name, dataset_name, "target.extent3", filesize=MB_512,
-                                                 extent_name="extent3", serial='') as extent3_config:
-                                    # We expect this to complete, but generate a serial number
-                                    assert len(extent3_config['serial']) == 15, extent3_config['serial']
-
-
-def check_inq_enabled_state(iqn, expected):
-    """Check the current enabled state of the specified SCST IQN directly from /sys
-    is as expected."""
-    results = SSH_TEST(f"cat /sys/kernel/scst_tgt/targets/iscsi/{iqn}/enabled", user, password)
-    assert results['result'] is True, f'out: {results["output"]}, err: {results["stderr"]}'
-    for line in results["output"].split('\n'):
-        if line.startswith('Warning: Permanently added'):
-            continue
-        if line:
-            actual = int(line)
-    assert actual == expected, f'IQN {iqn} has an unexpected enabled state - was {actual}, expected {expected}'
-
-
-def test_30_target_without_active_extent(request):
-    """Validate that a target will not be enabled if it does not have
-    and enabled associated extents"""
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    name1 = f"{target_name}x1"
-    name2 = f"{target_name}x2"
-    iqn1 = f'{basename}:{name1}'
-    iqn2 = f'{basename}:{name2}'
-
-    with initiator_portal() as config:
-        with configured_target(config, name1, 'VOLUME') as target1_config:
-            with configured_target(config, name2, 'VOLUME') as target2_config:
-                # OK, we've configured two separate targets, ensure all looks good
-                check_inq_enabled_state(iqn1, 1)
-                check_inq_enabled_state(iqn2, 1)
-                with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
-                    TUR(s1)
-                with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
-                    TUR(s2)
-
-                # Disable an extent and ensure things are as expected
-                extent_disable(target2_config['extent']['id'])
-                check_inq_enabled_state(iqn1, 1)
-                check_inq_enabled_state(iqn2, 0)
-                with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
-                    TUR(s1)
-                with pytest.raises(RuntimeError) as ve:
-                    with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
-                        TUR(s2)
-                assert 'Unable to connect to' in str(ve), ve
-
-                # Reenable the extent
-                extent_enable(target2_config['extent']['id'])
-                check_inq_enabled_state(iqn1, 1)
-                check_inq_enabled_state(iqn2, 1)
-                with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
-                    TUR(s1)
-                with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
-                    TUR(s2)
-
-                # Move the extent from target2 to target1
-                #
-                # Doing this by updating the existing association rather
-                # than deleting the old association and creating a new one,
-                # because want to avoid breakage wrt yield ... finally cleanup
-                payload = {
-                    'target': target1_config['target']['id'],
-                    'lunid': 1,
-                    'extent': target2_config['extent']['id']
-                }
-                call('iscsi.targetextent.update', target2_config['associate']['id'], payload)
-
-                check_inq_enabled_state(iqn1, 1)
-                check_inq_enabled_state(iqn2, 0)
-                with iscsi_scsi_connection(truenas_server.ip, iqn1) as s1:
-                    TUR(s1)
-                # We should now have a LUN 1
-                with iscsi_scsi_connection(truenas_server.ip, iqn1, 1) as s1b:
-                    TUR(s1b)
-                with pytest.raises(RuntimeError) as ve:
-                    with iscsi_scsi_connection(truenas_server.ip, iqn2) as s2:
-                        TUR(s2)
-                assert 'Unable to connect to' in str(ve), ve
-
-
-def test_31_iscsi_sessions(request):
-    """Validate that we can get a list of currently running iSCSI sessions."""
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    name1 = f"{target_name}x1"
-    name2 = f"{target_name}x2"
-    name3 = f"{target_name}x3"
-    iqn1 = f'{basename}:{name1}'
-    iqn2 = f'{basename}:{name2}'
-    iqn3 = f'{basename}:{name3}'
-    initiator_base = f"iqn.2018-01.org.pyscsi:{socket.gethostname()}"
-    initiator_iqn1 = f"{initiator_base}:one"
-    initiator_iqn2 = f"{initiator_base}:two"
-    initiator_iqn3 = f"{initiator_base}:three"
-
-    with initiator_portal() as config:
-        with configured_target(config, name1, 'VOLUME'):
-            with configured_target(config, name2, 'FILE'):
-                with configured_target(config, name3, 'VOLUME'):
-                    verify_client_count(0)
-                    with iscsi_scsi_connection(truenas_server.ip, iqn1, initiator_name=initiator_iqn1):
-                        verify_client_count(1)
-                        with iscsi_scsi_connection(truenas_server.ip, iqn2, initiator_name=initiator_iqn2):
-                            # Client count checks the number of different IPs attached, not sessions
-                            verify_client_count(1)
-                            # Validate that the two sessions are reported correctly
-                            data = get_iscsi_sessions(check_length=2)
-                            for sess in data:
-                                if sess['target'] == iqn1:
-                                    assert sess['initiator'] == initiator_iqn1, data
-                                elif sess['target'] == iqn2:
-                                    assert sess['initiator'] == initiator_iqn2, data
-                                else:
-                                    # Unknown target!
-                                    assert False, data
-                            # Filter by target
-                            data = get_iscsi_sessions([['target', '=', iqn1]], 1)
-                            assert data[0]['initiator'] == initiator_iqn1, data
-                            data = get_iscsi_sessions([['target', '=', iqn2]], 1)
-                            assert data[0]['initiator'] == initiator_iqn2, data
-                            data = get_iscsi_sessions([['target', '=', iqn3]], 0)
-                            # Filter by initiator
-                            data = get_iscsi_sessions([['initiator', '=', initiator_iqn1]], 1)
-                            assert data[0]['target'] == iqn1, data
-                            data = get_iscsi_sessions([['initiator', '=', initiator_iqn2]], 1)
-                            assert data[0]['target'] == iqn2, data
-                            data = get_iscsi_sessions([['initiator', '=', initiator_iqn3]], 0)
-                            # Now login to target2 with initiator1
-                            with iscsi_scsi_connection(truenas_server.ip, iqn2, initiator_name=initiator_iqn1):
-                                verify_client_count(1)
-                                get_iscsi_sessions(check_length=3)
-                                # Filter by target
-                                data = get_iscsi_sessions([['target', '=', iqn1]], 1)
-                                assert data[0]['initiator'] == initiator_iqn1, data
-                                data = get_iscsi_sessions([['target', '=', iqn2]], 2)
-                                assert set([sess['initiator'] for sess in data]) == {initiator_iqn1, initiator_iqn2}, data
-                                data = get_iscsi_sessions([['target', '=', iqn3]], 0)
-                                # Filter by initiator
-                                data = get_iscsi_sessions([['initiator', '=', initiator_iqn1]], 2)
-                                assert set([sess['target'] for sess in data]) == {iqn1, iqn2}, data
-                                data = get_iscsi_sessions([['initiator', '=', initiator_iqn2]], 1)
-                                assert data[0]['target'] == iqn2, data
-                                data = get_iscsi_sessions([['initiator', '=', initiator_iqn3]], 0)
-                            # Logout of target, ensure sessions get updated.
-                            verify_client_count(1)
-                            data = get_iscsi_sessions(check_length=2)
-                            for sess in data:
-                                if sess['target'] == iqn1:
-                                    assert sess['initiator'] == initiator_iqn1, data
-                                elif sess['target'] == iqn2:
-                                    assert sess['initiator'] == initiator_iqn2, data
-                                else:
-                                    # Unknown target!
-                                    assert False, data
-                        # Client count checks the number of different IPs attached, not sessions
-                        verify_client_count(1)
-                        get_iscsi_sessions(check_length=1)
-                    verify_client_count(0)
-                    get_iscsi_sessions(check_length=0)
-
-
-def test_32_multi_lun_targets(request):
-    """Validate that we can create and access multi-LUN targets."""
-    depends(request, ["iscsi_cmd_00"], scope="session")
-
-    name1 = f"{target_name}x1"
-    name2 = f"{target_name}x2"
-    iqn1 = f'{basename}:{name1}'
-    iqn2 = f'{basename}:{name2}'
-
-    def test_target_sizes(ipaddr):
-        with iscsi_scsi_connection(ipaddr, iqn1, 0) as s:
-            verify_capacity(s, MB_100)
-        with iscsi_scsi_connection(ipaddr, iqn1, 1) as s:
-            verify_capacity(s, MB_200)
-        with iscsi_scsi_connection(ipaddr, iqn2, 0) as s:
-            verify_capacity(s, MB_256)
-        with iscsi_scsi_connection(ipaddr, iqn2, 1) as s:
-            verify_capacity(s, MB_512)
-
-    with initiator_portal() as config:
-        with configured_target(config, name1, 'FILE', extent_size=MB_100) as config1:
-            with add_file_extent_target_lun(config1, 1, MB_200):
-                with configured_target(config, name2, 'VOLUME', extent_size=MB_256) as config1:
-                    with add_zvol_extent_target_lun(config1, 1, volsize=MB_512):
-                        # Check that we can connect to each LUN and that it has the expected capacity
-                        test_target_sizes(truenas_server.ip)
-                        if ha:
-                            # Only perform this section on a HA system
-                            with alua_enabled():
-                                test_target_sizes(truenas_server.nodea_ip)
-                                test_target_sizes(truenas_server.nodeb_ip)
-
-
-def test_33_no_lun_zero():
-    """
-    Verify that an iSCSI client can login to a target that is missing LUN 0 (and LUN 1)
-    and that report LUNs works as expected.
-    """
-    iqn = f'{basename}:{target_name}'
-    with initiator_portal() as config:
-        portal_id = config['portal']['id']
-        with target(target_name, [{'portal': portal_id}]) as target_config:
-            target_id = target_config['id']
-            with dataset(dataset_name):
-                with file_extent(pool_name, dataset_name, "target.extent1", filesize=MB_100, extent_name="extent1") as extent1_config:
-                    with file_extent(pool_name, dataset_name, "target.extent2", filesize=MB_256, extent_name="extent2") as extent2_config:
-                        with target_extent_associate(target_id, extent1_config['id'], 100):
-                            with target_extent_associate(target_id, extent2_config['id'], 101):
-                                # libiscsi sends a TUR to the lun on connect, so cannot properly test using it.
-                                # Let's actually login and check that the expected LUNs surface.
-                                assert target_login_test(get_ip_addr(truenas_server.ip), iqn, {100, 101})
-
-                                # With libiscsi we can also check that the expected LUNs are there
-                                with iscsi_scsi_connection(truenas_server.ip, iqn, 100) as s:
-                                    verify_luns(s, [100, 101])
-
-
-def test_34_zvol_extent_volthreading():
-    """
-    Ensure that volthreading is on for regular zvols and off when they are being
-    used an iSCSI extent.
-    """
-    zvol_name = f"zvol_volthreading_test{digit}"
-    zvol = f'{pool_name}/{zvol_name}'
-    with zvol_dataset(zvol, MB_100, True, True):
-        assert get_volthreading(zvol) == 'on'
-        with zvol_extent(zvol, extent_name='zvolextent1'):
-            assert get_volthreading(zvol) == 'off'
-        assert get_volthreading(zvol) == 'on'
-
-
-@pytest.mark.parametrize('extent_type', ["FILE", "VOLUME"])
-def test_35_delete_extent_no_dataset(extent_type):
-    """
-    Verify that even if a dataset that contains an extent has been deleted from
-    the command line, can still use the webui/API to delete the extent.
-    """
-    dataset_name = f'iscsids_{extent_type}_{digit}'
-    with dataset(dataset_name) as dspath:
-        DESTROY_CMD = f'zfs destroy -r {dspath}'
-        match extent_type:
-            case 'FILE':
-                with file_extent(pool_name, dataset_name, 'testfile', extent_name='fileextent1'):
-                    ssh(DESTROY_CMD)
-            case 'VOLUME':
-                zvol = f'{dspath}/zvol{digit}'
-                with zvol_dataset(zvol, MB_100, True, True):
-                    with zvol_extent(zvol, extent_name='zvolextent1'):
-                        ssh(DESTROY_CMD)
-
-
-def test_99_teardown(request):
-    # Disable iSCSI service
-    depends(request, ["iscsi_cmd_00"])
-    payload = {'enable': False}
-    call('service.update', 'iscsitarget', payload)
-    # Stop iSCSI service.
-    call('service.stop', 'iscsitarget')
-    sleep(1)
-    # Verify stopped
-    service = _get_service()
-    assert service['state'] == "STOPPED", service
diff --git a/tests/api2/test_262_iscsi_alua.py b/tests/api2/test_262_iscsi_alua.py
deleted file mode 100644
index 564d8e6dcc2be..0000000000000
--- a/tests/api2/test_262_iscsi_alua.py
+++ /dev/null
@@ -1,575 +0,0 @@
-import contextlib
-import random
-import string
-from time import sleep
-
-import pytest
-from assets.websocket.iscsi import (alua_enabled, initiator_portal, target, target_extent_associate, verify_capacity,
-                                    verify_ha_inquiry, verify_luns, zvol_extent)
-from assets.websocket.service import ensure_service_enabled
-from auto_config import ha, pool_name
-from protocols import iscsi_scsi_connection
-
-from middlewared.test.integration.assets.hostkvm import get_kvm_domain, poweroff_vm, reset_vm, start_vm
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.client import truenas_server
-
-pytestmark = pytest.mark.skipif(not ha, reason='Tests applicable to HA only')
-
-SERVICE_NAME = 'iscsitarget'
-MB = 1024 * 1024
-basename = 'iqn.2005-10.org.freenas.ctl'
-
-
-def other_domain(hadomain):
-    if hadomain.endswith('_c1'):
-        return f'{hadomain[:-1]}2'
-    elif hadomain.endswith('_c2'):
-        return f'{hadomain[:-1]}1'
-    raise ValueError(f'Invalid HA domain name: {hadomain}')
-
-
-@contextlib.contextmanager
-def zvol(name, volsizeMB):
-    payload = {
-        'name': f'{pool_name}/{name}',
-        'type': 'VOLUME',
-        'volsize': volsizeMB * MB,
-        'volblocksize': '16K'
-    }
-    config = call('pool.dataset.create', payload)
-    try:
-        yield config
-    finally:
-        call('pool.dataset.delete', config['id'])
-
-
-class TestFixtureConfiguredALUA:
-    """Fixture for with iSCSI enabled and ALUA configured"""
-
-    ZEROS = bytearray(512)
-    BLOCKS = 5
-    VERBOSE = False
-    NUM_TARGETS = 10
-
-    def wait_for_settle(self):
-        if self.VERBOSE:
-            print('Checking ALUA status...')
-        retries = 12
-        while retries:
-            if call('iscsi.alua.settled'):
-                if self.VERBOSE:
-                    print('ALUA is settled')
-                break
-            retries -= 1
-            if self.VERBOSE:
-                print('Waiting for ALUA to settle')
-            sleep(5)
-
-    def wait_for_master(self, timeout=120):
-        for _ in range(timeout):
-            try:
-                if call('failover.status') == 'MASTER':
-                    if self.VERBOSE:
-                        print('Can communicate with new MASTER')
-                    break
-                if self.VERBOSE:
-                    print('Waiting for new MASTER')
-                sleep(1)
-            except Exception:
-                if self.VERBOSE:
-                    print('Exception while waiting for new MASTER')
-                sleep(1)
-
-    def wait_for_ready(self, timeout=120):
-        for _ in range(timeout):
-            try:
-                if call('system.ready'):
-                    if self.VERBOSE:
-                        print('System is ready')
-                    break
-                if self.VERBOSE:
-                    print('Waiting for ready')
-                sleep(1)
-            except Exception:
-                if self.VERBOSE:
-                    print('Exception while waiting for ready')
-                sleep(1)
-
-    def wait_for_backup(self, timeout=120):
-        for _ in range(timeout):
-            try:
-                if not call('failover.disabled.reasons'):
-                    if self.VERBOSE:
-                        print('Both controllers available')
-                    break
-                if self.VERBOSE:
-                    print('Waiting for BACKUP')
-                sleep(1)
-            except Exception:
-                if self.VERBOSE:
-                    print('Exception while waiting for BACKUP')
-                sleep(1)
-
-    def wait_for_new_master(self, oldnode, timeout=60):
-        for _ in range(timeout):
-            try:
-                newnode = call('failover.node')
-                if oldnode != newnode:
-                    if call('failover.status') == 'MASTER':
-                        if self.VERBOSE:
-                            print('Can communicate with new MASTER', newnode)
-                        return newnode
-                if self.VERBOSE:
-                    print('Waiting for new MASTER')
-                sleep(1)
-            except Exception:
-                if self.VERBOSE:
-                    print('Exception while waiting for new MASTER')
-                sleep(1)
-
-    def wait_for_failover_in_progress(self, timeout=120):
-        for _ in range(timeout):
-            try:
-                if not call('failover.in_progress'):
-                    if self.VERBOSE:
-                        print('Failover event complete')
-                    return
-                if self.VERBOSE:
-                    print('Waiting for failover event to complete')
-                sleep(1)
-            except Exception:
-                if self.VERBOSE:
-                    print('Exception while waiting for failover event to complete')
-                sleep(1)
-
-    @pytest.fixture(scope='class')
-    def alua_configured(self):
-        with ensure_service_enabled(SERVICE_NAME):
-            call('service.start', SERVICE_NAME)
-            with alua_enabled():
-                self.wait_for_settle()
-                with initiator_portal() as config:
-                    yield config
-            if self.VERBOSE:
-                print('Tore down ALUA')
-        if self.VERBOSE:
-            print('Tore down iSCSI')
-
-    @pytest.fixture(scope='class')
-    def fix_complex_alua_config(self, alua_configured):
-        """Fixture to create a non-trival ALUA iSCSI configuration"""
-        # Will create 10 targets (0-9) with 0 to 9 LUNs
-        config = alua_configured
-        portal_id = config['portal']['id']
-        digits = ''.join(random.choices(string.digits, k=4))
-        # iqn = f'iqn.2005-10.org.freenas.ctl:{target_name}'
-        targets = {}
-        with contextlib.ExitStack() as es:
-            for i in range(self.NUM_TARGETS):
-                namebase = f'{digits}x{i}'
-                if self.VERBOSE:
-                    print(f'Creating target {i}...')
-                target_config = es.enter_context(target(f'target{namebase}', [{'portal': portal_id}]))
-                target_id = target_config['id']
-                target_config['luns'] = {}
-                luncount = self.lun_count(i)
-                for j in range(luncount):
-                    sizemb = 20 + (10 * (j + 1))
-                    if i > 7:
-                        lun = 100 + j
-                    else:
-                        lun = j
-                    if self.VERBOSE:
-                        print(f'Creating extent (LUN {lun} {sizemb}MB)...')
-                    target_config['luns'][lun] = es.enter_context(
-                        self.target_lun(target_id, f'extent{namebase}l{lun}', sizemb, lun)
-                    )
-                targets[i] = target_config
-            sleep(2)
-            self.wait_for_settle()
-            yield targets
-            if self.VERBOSE:
-                print(f'Tearing down {self.NUM_TARGETS} targets ...')
-        if self.VERBOSE:
-            print(f'Tore down {self.NUM_TARGETS} targets')
-
-    @contextlib.contextmanager
-    def target_lun(self, target_id, zvol_name, mb, lun):
-        with zvol(zvol_name, mb) as zvol_config:
-            with zvol_extent(zvol_config['id'], zvol_name) as extent_config:
-                with target_extent_associate(target_id, extent_config['id'], lun) as associate_config:
-                    yield {
-                        'zvol': zvol_config,
-                        'extent': extent_config,
-                        'associate': associate_config
-                    }
-
-    def verify_luns(self, iqn, lun_size_list):
-        """Ensure that the expected LUNs are visible from each controller."""
-        lun_list = [lun for lun, _ in lun_size_list]
-        for lun, mb in lun_size_list:
-            # Node A
-            with iscsi_scsi_connection(truenas_server.nodea_ip, iqn, lun) as s:
-                verify_luns(s, lun_list)
-                verify_capacity(s, mb * MB)
-            # Node B
-            with iscsi_scsi_connection(truenas_server.nodeb_ip, iqn, lun) as s:
-                verify_luns(s, lun_list)
-                verify_capacity(s, mb * MB)
-
-    def lun_count(self, targetnum):
-        match targetnum:
-            case 0:
-                return 0
-            case 1 | 2 | 3 | 4 | 5:
-                return 1
-            case 6 | 7 | 8:
-                return 2
-            case _:
-                return 5
-
-    def test_alua_luns(self, alua_configured):
-        """Test whether an ALUA target reacts correctly to having a LUN added
-        and removed again (in terms of REPORT LUNS response)"""
-        config = alua_configured
-        portal_id = config['portal']['id']
-        digits = ''.join(random.choices(string.digits, k=4))
-        target_name = f'target{digits}'
-        iqn = f'iqn.2005-10.org.freenas.ctl:{target_name}'
-        with target(target_name, [{'portal': portal_id}]) as target_config:
-            target_id = target_config['id']
-            # First configure a single extent at LUN 0 and ensure that we
-            # can see it from both interfaces.
-            with self.target_lun(target_id, f'extent0_{digits}', 100, 0):
-                sleep(2)
-                self.wait_for_settle()
-                self.verify_luns(iqn, [(0, 100)])
-
-                # Next add a 2nd extent at LUN 1 and ensure that we can see both LUNs
-                # from both interfaces.
-                with self.target_lun(target_id, f'extent1_{digits}', 200, 1):
-                    sleep(2)
-                    self.wait_for_settle()
-                    self.verify_luns(iqn, [(0, 100), (1, 200)])
-
-                # After the LUN 1 extent has been removed again, ensure that we cannot see it
-                # any longer.
-                sleep(2)
-                self.wait_for_settle()
-                self.verify_luns(iqn, [(0, 100)])
-
-                # Next add back a 2nd extent at LUN 1 (with a different size) and ensure
-                # that we can still see both LUNs from both interfaces.
-                with self.target_lun(target_id, f'extent1_{digits}', 250, 1):
-                    sleep(2)
-                    self.wait_for_settle()
-                    self.verify_luns(iqn, [(0, 100), (1, 250)])
-                    # Add a third LUN
-                    with self.target_lun(target_id, f'extent2_{digits}', 300, 2):
-                        sleep(2)
-                        self.wait_for_settle()
-                        self.verify_luns(iqn, [(0, 100), (1, 250), (2, 300)])
-                    sleep(2)
-                    self.wait_for_settle()
-                    self.verify_luns(iqn, [(0, 100), (1, 250)])
-                sleep(2)
-                self.wait_for_settle()
-                self.verify_luns(iqn, [(0, 100)])
-
-    def test_alua_lun_100(self, alua_configured):
-        """Test that an ALUA target - without a LUN 0 - works correctly with only LUN 100."""
-        config = alua_configured
-        portal_id = config['portal']['id']
-        digits = ''.join(random.choices(string.digits, k=4))
-        target_name = f'target{digits}'
-        iqn = f'iqn.2005-10.org.freenas.ctl:{target_name}'
-        with target(target_name, [{'portal': portal_id}]) as target_config:
-            target_id = target_config['id']
-            # First configure a single extent at LUN 0 and ensure that we
-            # can see it from both interfaces.
-            with self.target_lun(target_id, f'extent0_{digits}', 200, 100):
-                sleep(2)
-                self.wait_for_settle()
-                self.verify_luns(iqn, [(100, 200)])
-            sleep(2)
-            self.wait_for_settle()
-
-    def visit_luns(self, ip, config, callback):
-        """Run the specified callback method for each LUN in the config"""
-        for target_num, target_config in config.items():
-            luns = target_config['luns']
-            if not luns:
-                # If no LUNs then we can't talk to the target.
-                continue
-            target_name = target_config['name']
-            iqn = f'{basename}:{target_name}'
-            for lun, lun_config in luns.items():
-                with iscsi_scsi_connection(ip, iqn, lun) as s:
-                    callback(s, target_num, lun, lun_config)
-
-    def validate_shape(self, ip, config, tpgs=1):
-        """Validate that each LUN in the config has the expected shape.
-
-        For example, serial number, NAA, size.
-        """
-        def validate_lun(s, target_num, lun, lun_config):
-            api_serial_number = lun_config['extent']['serial']
-            api_naa = lun_config['extent']['naa']
-            verify_ha_inquiry(s, api_serial_number, api_naa, tpgs)
-            if 'zvol' in lun_config:
-                verify_capacity(s, lun_config['zvol']['volsize']['parsed'])
-            if self.VERBOSE:
-                print(f'Target {target_num} LUN {lun} shape OK')
-        self.visit_luns(ip, config, validate_lun)
-
-    @pytest.fixture(scope='class')
-    def fix_validate_shapes(self, fix_complex_alua_config):
-        """Fixture that validates that the complex ALUA config has the right shape."""
-        # Make sure that each controller is exporting the targets/LUNs we expect
-        if self.VERBOSE:
-            print('Validate shape seen by Node A...')
-        self.validate_shape(truenas_server.nodea_ip, fix_complex_alua_config)
-
-        if self.VERBOSE:
-            print('Validate shape seen by Node B...')
-        self.validate_shape(truenas_server.nodeb_ip, fix_complex_alua_config)
-
-        if self.VERBOSE:
-            print('Validated shape')
-        yield fix_complex_alua_config
-
-    def zero_luns(self, ip, config):
-        def zero_lun(s, target_num, lun, lun_config):
-            # Write zeros using WRITE SAME (16)
-            s.writesame16(0, self.BLOCKS, self.ZEROS)
-        self.visit_luns(ip, config, zero_lun)
-
-    def check_zero_luns(self, ip, config):
-        def check_zero_lun(s, target_num, lun, lun_config):
-            r = s.read16(0, self.BLOCKS)
-            assert r.datain == self.ZEROS * self.BLOCKS, r.datain
-        self.visit_luns(ip, config, check_zero_lun)
-
-    @pytest.fixture(scope='class')
-    def fix_zero_luns(self, fix_validate_shapes):
-        """Fixture that validates that the complex ALUA config has zeros written to LUNs."""
-        # Zero the LUNs
-        self.zero_luns(truenas_server.nodea_ip, fix_validate_shapes)
-
-        # Check that the LUNs are zeroed
-        self.check_zero_luns(truenas_server.nodea_ip, fix_validate_shapes)
-        self.check_zero_luns(truenas_server.nodeb_ip, fix_validate_shapes)
-
-        if self.VERBOSE:
-            print('LUNs zeroed')
-        return fix_validate_shapes
-
-    def page_pattern(self, target_num, lun):
-        """
-        Return a 512 byte long bytearray unique to the target/lun.
-        """
-        basis = f'TARGET {target_num} LUN {lun} ------'
-        b = bytearray()
-        b.extend(basis[:16].encode())
-        pattern = b * 32
-        assert len(pattern) == 512, pattern
-        return pattern
-
-    def write_patterns(self, ip, config):
-        def write_pattern(s, target_num, lun, lun_config):
-            s.writesame16(1, 2, self.page_pattern(target_num, lun))
-        self.visit_luns(ip, config, write_pattern)
-
-    def check_patterns(self, ip, config):
-        def check_pattern(s, target_num, lun, lun_config):
-            pattern = self.page_pattern(target_num, lun)
-            r = s.read16(0, 1)
-            assert r.datain == self.ZEROS, r.datain
-            r = s.read16(1, 2)
-            assert r.datain == pattern * 2, r.datain
-            r = s.read16(3, 1)
-            assert r.datain == self.ZEROS, r.datain
-            if self.VERBOSE:
-                print(f'Target {target_num} LUN {lun} pattern OK:', pattern[:16])
-        self.visit_luns(ip, config, check_pattern)
-
-    @pytest.fixture(scope='class')
-    def fix_write_patterns(self, fix_zero_luns):
-        """Fixture that writes a data pattern to the complex ALUA config."""
-        # Write the pattern
-        self.write_patterns(truenas_server.nodea_ip, fix_zero_luns)
-        if self.VERBOSE:
-            print('Wrote LUN patterns')
-
-        # Check that the LUNs have the correct patterns
-        if self.VERBOSE:
-            print('Validate data pattern seen by Node A...')
-        self.check_patterns(truenas_server.nodea_ip, fix_zero_luns)
-        if self.VERBOSE:
-            print('Validate data pattern seen by Node B...')
-        self.check_patterns(truenas_server.nodeb_ip, fix_zero_luns)
-
-        if self.VERBOSE:
-            print('LUNs have pattern written / checked')
-        return fix_zero_luns
-
-    @pytest.fixture(scope='class')
-    def fix_orig_active_node(self):
-        return call('failover.node')
-
-    @pytest.mark.timeout(900)
-    def test_complex_alua_setup(self, fix_validate_shapes, fix_orig_active_node):
-        """
-        Test that the complex ALUA configuration is setup, and has the correct shape.
-        """
-        orig_active_node = fix_orig_active_node
-        assert orig_active_node in ['A', 'B']
-
-    @pytest.mark.timeout(900)
-    def test_complex_zero_luns(self, fix_zero_luns):
-        """
-        Test that the complex ALUA configuration is setup, and has zeros written
-        to LUNs.
-        """
-        pass
-
-    @pytest.mark.timeout(900)
-    def test_complex_write_patterns(self, fix_write_patterns):
-        """
-        Test that the complex ALUA configuration is setup, and has a data pattern written
-        to LUNs.
-        """
-        pass
-
-    @pytest.fixture
-    def fix_get_domain(self):
-        """
-        Fixture to get the KVM domain associated with the current
-        MASTER node.
-
-        Note: unlike most other fixtures in this class, the fixture does NOT
-        have class scope.
-        """
-        # Do some sanity checks before we proceed.
-        assert call('failover.status') == 'MASTER'
-
-        node = call('failover.node')
-        assert node in ['A', 'B']
-
-        domain = get_kvm_domain()
-        assert domain
-        if node == 'A':
-            assert domain.endswith('_c1')
-        elif node == 'B':
-            assert domain.endswith('_c2')
-
-        return {'node': node, 'domain': domain}
-
-    @pytest.mark.timeout(900)
-    def test_failover_complex_alua_config(self, fix_write_patterns, fix_get_domain):
-        """
-        Power off the current MASTER and ensure that the previous BACKUP node serves
-        the ALUA targets, as soon as failover is complete.
-        """
-        node = fix_get_domain['node']
-        domain = fix_get_domain['domain']
-
-        # Shutdown the current MASTER.
-        if self.VERBOSE:
-            print('Powering off VM', domain)
-        poweroff_vm(domain)
-
-        # Wait for the new MASTER to come up
-        newnode = self.wait_for_new_master(node)
-
-        # Wait for the failover event to complete
-        self.wait_for_failover_in_progress()
-
-        if newnode == 'A':
-            new_ip = truenas_server.nodea_ip
-        else:
-            new_ip = truenas_server.nodeb_ip
-
-        if self.VERBOSE:
-            print(f'Validate shape seen by Node {newnode}...')
-        self.validate_shape(new_ip, fix_write_patterns, 0)
-        if self.VERBOSE:
-            print(f'Validate data pattern seen by Node {newnode}...')
-        self.check_patterns(new_ip, fix_write_patterns)
-
-        if self.VERBOSE:
-            print(f'Validate data pattern seen by Node {newnode}...')
-
-    @pytest.mark.timeout(900)
-    def test_boot_complex_alua_config(self, fix_write_patterns, fix_get_domain, fix_orig_active_node):
-        """
-        Reset the current MASTER, and repower the previous MASTER and ensure that
-        ALUA targets are served by both nodes.
-        """
-        domain = fix_get_domain['domain']
-        orig_domain = other_domain(domain)
-
-        # Reset the MASTER
-        reset_vm(domain)
-        if self.VERBOSE:
-            print('Reset VM', domain)
-
-        # Power the shutdown node back on.
-        start_vm(orig_domain)
-        if self.VERBOSE:
-            print('Started VM', orig_domain)
-
-        sleep(5)
-
-        # Wait for the new MASTER to come up
-        self.wait_for_master()
-        self.wait_for_failover_in_progress()
-        self.wait_for_ready()
-        assert call('system.info')['uptime_seconds'] < 600
-
-        # Ensure that the BACKUP is also up
-        self.wait_for_backup()
-        self.wait_for_settle()
-        assert call('failover.call_remote', 'system.info')['uptime_seconds'] < 600
-
-        newnode = call('failover.node')
-        assert newnode in ['A', 'B']
-
-        if newnode == 'A':
-            new_ip = truenas_server.nodea_ip
-            other_ip = truenas_server.nodeb_ip
-            othernode = 'B'
-        else:
-            new_ip = truenas_server.nodeb_ip
-            other_ip = truenas_server.nodea_ip
-            othernode = 'A'
-
-        # Ensure that the targets look OK on MASTER
-        if self.VERBOSE:
-            print(f'Validate shape seen by Node {newnode}...')
-        self.validate_shape(new_ip, fix_write_patterns, None)
-
-        if self.VERBOSE:
-            print(f'Validate data pattern seen by Node {newnode}...')
-        self.check_patterns(new_ip, fix_write_patterns)
-
-        # Ensure that the targets look OK on BACKUP
-        if self.VERBOSE:
-            print(f'Validate shape seen by Node {othernode}...')
-        self.validate_shape(other_ip, fix_write_patterns, 1)
-
-        if self.VERBOSE:
-            print(f'Validate data pattern seen by Node {othernode}...')
-        self.check_patterns(other_ip, fix_write_patterns)
-
-        # Finally, we want to ensure that we have the same MASTER node as
-        # when these tests started.
-        if newnode != fix_orig_active_node:
-            if self.VERBOSE:
-                print(f'Restoring {fix_orig_active_node} as MASTER')
-            call('system.reboot', 'iSCSI ALUA test')
-            newnode2 = self.wait_for_new_master(newnode)
-            assert newnode2 == fix_orig_active_node
-            self.wait_for_backup()
-            self.wait_for_settle()
diff --git a/tests/api2/test_275_ldap.py b/tests/api2/test_275_ldap.py
deleted file mode 100644
index 16ca75add2468..0000000000000
--- a/tests/api2/test_275_ldap.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.directory_service import ldap, LDAPUSER, LDAPPASSWORD
-from middlewared.test.integration.assets.privilege import privilege
-from middlewared.test.integration.assets.product import product_type
-from middlewared.test.integration.utils import call, client
-
-pytestmark = [
-    pytest.mark.skipif(not LDAPUSER, reason='Missing LDAP configuration'),
-]
-
-
-@pytest.fixture(scope="module")
-def do_ldap_connection(request):
-    with ldap() as ldap_conn:
-        with product_type():
-            yield ldap_conn
-
-
-def test_ldap_initial_state():
-    ds = call("directoryservices.status")
-    assert ds["type"] is None
-    assert ds["status"] is None
-
-    ldap_config = call("ldap.config")
-    assert not ldap_config["enable"]
-
-
-def test_ldap_schema_choices():
-    expected = {"RFC2307", "RFC2307BIS"}
-
-    choices = call("ldap.schema_choices")
-    assert set(choices) == expected
-
-
-def test_get_ldap_ssl_choices():
-    expected = {"OFF", "ON", "START_TLS"}
-
-    choices = call("ldap.ssl_choices")
-    assert set(choices) == expected
-
-
-def test_ldap_connection(do_ldap_connection):
-    ds = call("directoryservices.status")
-    assert ds["type"] == "LDAP"
-    assert ds["status"] == "HEALTHY"
-
-    ldap_config = call("ldap.config")
-    assert ldap_config["enable"]
-    assert ldap_config["server_type"] == "OPENLDAP"
-
-
-def test_ldap_user_group_cache(do_ldap_connection):
-    assert call("user.query", [["local", "=", False]], {'count': True}) != 0
-    assert call("group.query", [["local", "=", False]], {'count': True}) != 0
-
-
-def test_account_privilege_authentication(do_ldap_connection):
-
-    call("system.general.update", {"ds_auth": True})
-    try:
-        group = call("user.get_user_obj", {"username": LDAPUSER})
-        assert group["source"] == "LDAP"
-        with privilege({
-            "name": "LDAP privilege",
-            "local_groups": [],
-            "ds_groups": [group["pw_gid"]],
-            "allowlist": [{"method": "CALL", "resource": "system.info"}],
-            "web_shell": False,
-        }):
-            with client(auth=(LDAPUSER, LDAPPASSWORD)) as c:
-                methods = c.call("core.get_methods")
-
-            assert "system.info" in methods
-            assert "pool.create" not in methods
-    finally:
-        call("system.general.update", {"ds_auth": False})
diff --git a/tests/api2/test_278_freeipa.py b/tests/api2/test_278_freeipa.py
deleted file mode 100644
index cce72757e5f62..0000000000000
--- a/tests/api2/test_278_freeipa.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/usr/bin/env python3
-
-import pytest
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from functions import SSH_TEST
-from middlewared.test.integration.assets.directory_service import ldap
-from middlewared.test.integration.utils import call
-from auto_config import ha, user, password
-
-try:
-    from config import (
-        FREEIPA_IP,
-        FREEIPA_BASEDN,
-        FREEIPA_BINDDN,
-        FREEIPA_BINDPW,
-        FREEIPA_HOSTNAME,
-    )
-except ImportError:
-    Reason = 'FREEIPA* variable are not setup in config.py'
-    pytestmark = pytest.mark.skipif(True, reason=Reason)
-
-
-@pytest.fixture(scope="module")
-def do_freeipa_connection():
-    # Confirm DNS forward
-    res = SSH_TEST(f"host {FREEIPA_HOSTNAME}", user, password)
-    assert res['result'] is True, res
-    # stdout: "<FREEIPA_HOSTNAME> has address <FREEIPA_IP>"
-    assert res['stdout'].split()[-1] == FREEIPA_IP
-
-    # DNS reverse
-    res = SSH_TEST(f"host {FREEIPA_IP}", user, password)
-    assert res['result'] is True, res
-    # stdout: <FREEIPA_IP_reverse_format>.in-addr.arpa domain name pointer <FREEIPA_HOSTNAME>.
-    assert res['stdout'].split()[-1] == FREEIPA_HOSTNAME + "."
-
-    with ldap(
-        FREEIPA_BASEDN,
-        FREEIPA_BINDDN,
-        FREEIPA_BINDPW,
-        FREEIPA_HOSTNAME,
-        validate_certificates=False,
-    ) as ldap_conn:
-        yield ldap_conn
-
-
-    # Validate that our LDAP configuration alert goes away when it's disabled.
-    alerts = [alert['klass'] for alert in call('alert.list')]
-
-    # There's a one-shot alert that gets fired if we are an IPA domain
-    # connected via legacy mechanism.
-    assert 'IPALegacyConfiguration' not in alerts
-
-
-def test_setup_and_enabling_freeipa(do_freeipa_connection):
-    # We are intentionally using an expired password in order to force
-    # a legacy-style LDAP bind. We need this support to not break
-    # existing FreeIPA users on update. This should be reworked in FT.
-
-    ds = call('directoryservices.status')
-    assert ds['type'] == 'LDAP'
-    assert ds['status'] == 'HEALTHY'
-
-    alerts = [alert['klass'] for alert in call('alert.list')]
-
-    # There's a one-shot alert that gets fired if we are an IPA domain
-    # connected via legacy mechanism.
-    assert 'IPALegacyConfiguration' in alerts
-
-
-def test_verify_config(request):
-    ldap_config = call('ldap.config')
-    assert 'RFC2307BIS' == ldap_config['schema']
-    assert ldap_config['search_bases']['base_user'] == 'cn=users,cn=accounts,dc=tn,dc=ixsystems,dc=net'
-    assert ldap_config['search_bases']['base_group'] == 'cn=groups,cn=accounts,dc=tn,dc=ixsystems,dc=net'
-    assert ldap_config['search_bases']['base_netgroup'] == 'cn=ng,cn=compat,dc=tn,dc=ixsystems,dc=net'
-    assert ldap_config['server_type'] == 'FREEIPA'
-
-
-def test_verify_that_the_freeipa_user_id_exist_on_the_nas(do_freeipa_connection):
-    """
-    get_user_obj is a wrapper around the pwd module.
-    """
-    pwd_obj = call('user.get_user_obj', {'username': 'ixauto_restricted', 'get_groups': True})
-
-    assert pwd_obj['pw_uid'] == 925000003
-    assert pwd_obj['pw_gid'] == 925000003
-    assert len(pwd_obj['grouplist']) >= 1, pwd_obj['grouplist']
-
-
-def test_10_verify_support_for_netgroups(do_freeipa_connection):
-    """
-    'getent netgroup' should be able to retrieve netgroup
-    """
-    res = SSH_TEST("getent netgroup ixtestusers", user, password)
-    assert res['result'] is True, f"Failed to find netgroup 'ixgroup', returncode={res['returncode']}"
-
-    # Confirm expected set of users or hosts
-    ixgroup = res['stdout'].split()[1:]
-
-    # Confirm number of entries and some elements
-    assert len(ixgroup) == 3, ixgroup
-    assert any("testuser1" in sub for sub in ixgroup), ixgroup
diff --git a/tests/api2/test_300_nfs.py b/tests/api2/test_300_nfs.py
deleted file mode 100644
index 82a0c25cd6a50..0000000000000
--- a/tests/api2/test_300_nfs.py
+++ /dev/null
@@ -1,1872 +0,0 @@
-import contextlib
-import ipaddress
-import os
-import re
-from copy import copy
-from time import sleep
-
-import pytest
-
-from middlewared.service_exception import (
-    ValidationError, ValidationErrors, CallError, InstanceNotFound
-)
-from middlewared.test.integration.assets.account import group as create_group
-from middlewared.test.integration.assets.account import user as create_user
-from middlewared.test.integration.assets.filesystem import directory
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, mock, ssh
-from middlewared.test.integration.utils.string import random_string
-from middlewared.test.integration.utils.client import truenas_server
-from middlewared.test.integration.utils.failover import wait_for_standby
-from middlewared.test.integration.utils.system import reset_systemd_svcs as reset_svcs
-
-from auto_config import hostname, password, pool_name, user, ha
-from protocols import SSH_NFS, nfs_share
-
-MOUNTPOINT = f"/tmp/nfs-{hostname}"
-dataset = f"{pool_name}/nfs"
-dataset_url = dataset.replace('/', '%2F')
-NFS_PATH = "/mnt/" + dataset
-
-# Alias
-pp = pytest.param
-
-# Supported configuration files
-conf_file = {
-    "nfs": {
-        "pname": "/etc/nfs.conf.d/local.conf",
-        "sections": {
-            'nfsd': {},
-            'exportd': {},
-            'nfsdcld': {},
-            'nfsdcltrack': {},
-            'mountd': {},
-            'statd': {},
-            'lockd': {}}
-    },
-    "idmapd": {
-        "pname": "/etc/idmapd.conf",
-        "sections": {"General": {}, "Mapping": {}, "Translation": {}}
-    }
-}
-
-
-# =====================================================================
-#                     Fixtures and utilities
-# =====================================================================
-
-class NFS_CONFIG:
-    '''This is used to restore the NFS config to it's original state'''
-    initial_nfs_config = {}
-
-    # These are the expected default config values
-    default_config = {
-        "allow_nonroot": False,
-        "protocols": ["NFSV3", "NFSV4"],
-        "v4_krb": False,
-        "v4_domain": "",
-        "bindip": [],
-        "mountd_port": None,
-        "rpcstatd_port": None,
-        "rpclockd_port": None,
-        "mountd_log": False,  # nfs.py indicates this should be True, but db says False
-        "statd_lockd_log": False,
-        "v4_krb_enabled": False,
-        "userd_manage_gids": False,
-        "keytab_has_nfs_spn": False,
-        "managed_nfsd": True,
-        "rdma": False,
-    }
-
-    initial_service_state = {}
-
-    # These are the expected default run state values
-    default_service_state = {
-        "service": "nfs",
-        "enable": False,
-        "state": "STOPPED",
-        "pids": []
-    }
-
-
-def parse_exports():
-    exp = ssh("cat /etc/exports").splitlines()
-    rv = []
-    for idx, line in enumerate(exp):
-        if not line or line.startswith('\t'):
-            continue
-
-        entry = {"path": line.strip()[1:-2], "opts": []}
-
-        i = idx + 1
-        while i < len(exp):
-            if not exp[i].startswith('\t'):
-                break
-
-            e = exp[i].strip()
-            host, params = e.split('(', 1)
-            entry['opts'].append({
-                "host": host,
-                "parameters": params[:-1].split(",")
-            })
-            i += 1
-
-        rv.append(entry)
-
-    return rv
-
-
-def parse_server_config(conf_type="nfs"):
-    '''
-    Parse known 'ini' style conf files.  See definition of conf_file above.
-
-    Debian will read to /etc/default/nfs-common and then /etc/nfs.conf
-    All TrueNAS NFS settings are in /etc/nfs.conf.d/local.conf as overrides
-    '''
-    assert conf_type in conf_file.keys(), f"{conf_type} is not a supported conf type"
-    pathname = conf_file[conf_type]['pname']
-    rv = conf_file[conf_type]['sections']
-    expected_sections = rv.keys()
-
-    # Read the file and parse it
-    res = ssh(f"cat {pathname}")
-    conf = res.splitlines()
-    section = ''
-
-    for line in conf:
-        if not line or line.startswith("#"):
-            continue
-        if line.startswith("["):
-            section = line.split('[')[1].split(']')[0]
-            assert section in expected_sections, f"Unexpected section found: {section}"
-            continue
-
-        k, v = line.split(" = ", 1)
-        rv[section].update({k: v})
-
-    return rv
-
-
-def parse_rpcbind_config():
-    '''
-    In Debian 12 (Bookwork) rpcbind uses /etc/default/rpcbind.
-    Look for /etc/rpcbind.conf in future releases.
-    '''
-    conf = ssh("cat /etc/default/rpcbind").splitlines()
-    rv = {}
-
-    # With bindip the line of intrest looks like: OPTIONS=-w -h 192.168.40.156
-    for line in conf:
-        if not line or line.startswith("#"):
-            continue
-        if line.startswith("OPTIONS"):
-            opts = line.split('=')[1].split()
-            # '-w' is hard-wired, lets confirm that
-            assert len(opts) > 0
-            assert '-w' == opts[0]
-            rv['-w'] = ''
-            # If there are more opts they must the bindip settings
-            if len(opts) == 3:
-                rv[opts[1]] = opts[2]
-
-    return rv
-
-
-def get_nfs_service_state():
-    nfs_service = call('service.query', [['service', '=', 'nfs']], {'get': True})
-    return nfs_service['state']
-
-
-def set_nfs_service_state(do_what=None, expect_to_pass=True, fail_check=False):
-    """
-    Start or Stop NFS service
-    expect_to_pass parameter is optional
-    fail_check parameter is optional
-    """
-    assert do_what in ['start', 'stop'], f"Requested invalid service state: {do_what}"
-    test_res = {'start': True, 'stop': False}
-
-    if expect_to_pass:
-        res = call(f'service.{do_what}', 'nfs', {'silent': False})
-        sleep(1)
-        return res
-    else:
-        with pytest.raises(CallError) as e:
-            call(f'service.{do_what}', 'nfs', {'silent': False})
-        if fail_check:
-            assert fail_check in str(e.value)
-
-    # Confirm requested state
-    if expect_to_pass:
-        res = call('service.started', 'nfs')
-        assert res == test_res[do_what], f"Expected {test_res[do_what]} for NFS started result, but found {res}"
-        return res
-
-
-def get_client_nfs_port():
-    '''
-    Output from netstat -nt looks like:
-        tcp        0      0 127.0.0.1:50664         127.0.0.1:6000          ESTABLISHED
-    The client port is the number after the ':' in the 5th column
-    '''
-    rv = (None, None)
-    res = ssh("netstat -nt")
-    for line in str(res).splitlines():
-        # The server will listen on port 2049
-        if f"{truenas_server.ip}:2049" == line.split()[3]:
-            rv = (line, line.split()[4].split(':')[1])
-    return rv
-
-
-def set_immutable_state(path: str, want_immutable=True):
-    '''
-    Used by exportsd test
-    '''
-    call('filesystem.set_zfs_attributes', {
-        'path': path,
-        'zfs_file_attributes': {'immutable': want_immutable}
-    })
-    is_immutable = 'IMMUTABLE' in call('filesystem.stat', '/etc/exports.d')['attributes']
-
-    assert is_immutable is want_immutable, f"Expected mutable filesystem: {want_immutable}"
-
-
-def confirm_nfsd_processes(expected):
-    '''
-    Confirm the expected number of nfsd processes are running
-    '''
-    result = ssh("cat /proc/fs/nfsd/threads")
-    assert int(result) == expected, result
-
-
-def confirm_mountd_processes(expected):
-    '''
-    Confirm the expected number of mountd processes are running
-    '''
-    rx_mountd = r"rpc\.mountd"
-    result = ssh(f"ps -ef | grep '{rx_mountd}' | wc -l")
-
-    # If there is more than one, we subtract one to account for the rpc.mountd thread manager
-    num_detected = int(result)
-    assert (num_detected - 1 if num_detected > 1 else num_detected) == expected
-
-
-def confirm_rpc_processes(expected=['idmapd', 'bind', 'statd']):
-    '''
-    Confirm the expected rpc processes are running
-    NB: This only supports the listed names
-    '''
-    prepend = {'idmapd': 'rpc.', 'bind': 'rpc', 'statd': 'rpc.'}
-    for n in expected:
-        procname = prepend[n] + n
-        assert len(ssh(f"pgrep {procname}").splitlines()) > 0
-
-
-def confirm_nfs_version(expected=[]):
-    '''
-    Confirm the expected NFS versions are 'enabled and supported'
-    Possible values for expected:
-        ["3"] means NFSv3 only
-        ["4"] means NFSv4 only
-        ["3","4"] means both NFSv3 and NFSv4
-    '''
-    result = ssh("rpcinfo -s | grep ' nfs '").strip().split()[1]
-    for v in expected:
-        assert v in result, result
-
-
-def confirm_rpc_port(rpc_name, port_num):
-    '''
-    Confirm the expected port for the requested rpc process
-    rpc_name = ('mountd', 'status', 'nlockmgr')
-    '''
-    line = ssh(f"rpcinfo -p | grep {rpc_name} | grep tcp")
-    # example:    '100005    3   tcp    618  mountd'
-    assert int(line.split()[3]) == port_num, str(line)
-
-
-def run_missing_usrgrp_mapping_test(data: list[str], usrgrp, tmp_path, share, usrgrpInst):
-    ''' Used by test_invalid_user_group_mapping '''
-    parsed = parse_exports()
-    assert len(parsed) == 2, str(parsed)
-    this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}']
-    assert len(this_share) == 1, f"Did not find share {tmp_path}.\nexports = {parsed}"
-
-    # Remove the user/group and restart nfs
-    call(f'{usrgrp}.delete', usrgrpInst['id'])
-    call('service.restart', 'nfs')
-
-    # An alert should be generated
-    alerts = call('alert.list')
-    this_alert = [entry for entry in alerts if entry['klass'] == "NFSexportMappingInvalidNames"]
-    assert len(this_alert) == 1, f"Did not find alert for 'NFSexportMappingInvalidNames'.\n{alerts}"
-
-    # The NFS export should have been removed
-    parsed = parse_exports()
-    assert len(parsed) == 1, str(parsed)
-    this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}']
-    assert len(this_share) == 0, f"Unexpectedly found share {tmp_path}.\nexports = {parsed}"
-
-    # Modify share to map with a built-in user or group and restart NFS
-    call('sharing.nfs.update', share, {data[0]: "ftp"})
-    call('service.restart', 'nfs')
-
-    # The alert should be cleared
-    alerts = call('alert.list')
-    this_alert = [entry for entry in alerts if entry['key'] == "NFSexportMappingInvalidNames"]
-    assert len(this_alert) == 0, f"Unexpectedly found alert 'NFSexportMappingInvalidNames'.\n{alerts}"
-
-    # Share should have been restored
-    parsed = parse_exports()
-    assert len(parsed) == 2, str(parsed)
-    this_share = [entry for entry in parsed if entry['path'] == f'{tmp_path}']
-    assert len(this_share) == 1, f"Did not find share {tmp_path}.\nexports = {parsed}"
-
-
-@contextlib.contextmanager
-def manage_start_nfs():
-    """ The exit state is managed by init_nfs """
-    try:
-        yield set_nfs_service_state('start')
-    finally:
-        set_nfs_service_state('stop')
-
-
-def move_systemdataset(new_pool_name):
-    ''' Move the system dataset to the requested pool '''
-    try:
-        call('systemdataset.update', {'pool': new_pool_name}, job=True)
-    except Exception as e:
-        raise e
-    else:
-        if ha:
-            wait_for_standby()
-
-    return call('systemdataset.config')
-
-
-@contextlib.contextmanager
-def system_dataset(new_pool_name):
-    '''
-    Temporarily move the system dataset to the new_pool_name
-    '''
-    orig_sysds = call('systemdataset.config')
-    try:
-        sysds = move_systemdataset(new_pool_name)
-        yield sysds
-    finally:
-        move_systemdataset(orig_sysds['pool'])
-
-
-@contextlib.contextmanager
-def nfs_dataset(name, options=None, acl=None, mode=None, pool=None):
-    """
-    NOTE: This is _nearly_ the same as the 'dataset' test asset. The difference
-          is the retry loop.
-    TODO: Enhance the 'dataset' test asset to include a retry loop
-    """
-    assert "/" not in name
-    _pool_name = pool if pool else pool_name
-
-    _dataset = f"{_pool_name}/{name}"
-
-    try:
-        call("pool.dataset.create", {"name": _dataset, **(options or {})})
-
-        if acl is None:
-            call("filesystem.setperm", {'path': f"/mnt/{_dataset}", "mode": mode or "777"}, job=True)
-        else:
-            call("filesystem.setacl", {'path': f"/mnt/{_dataset}", "dacl": acl}, job=True)
-
-        yield _dataset
-
-    finally:
-        # dataset may be busy
-        sleep(2)
-        for _ in range(6):
-            try:
-                call("pool.dataset.delete", _dataset)
-                # Success
-                break
-            except InstanceNotFound:
-                # Also success
-                break
-            except Exception:
-                # Cannot yet delete
-                sleep(10)
-
-
-@contextlib.contextmanager
-def nfs_config():
-    ''' Use this to restore NFS settings '''
-    try:
-        nfs_db_conf = call("nfs.config")
-        excl = ['id', 'v4_krb_enabled', 'v4_owner_major', 'keytab_has_nfs_spn', 'managed_nfsd']
-        [nfs_db_conf.pop(key) for key in excl]
-        yield copy(nfs_db_conf)
-    finally:
-        call("nfs.update", nfs_db_conf)
-
-
-@contextlib.contextmanager
-def nfs_share_config(nfsid: int):
-    ''' Use this to restore NFS share settings '''
-    try:
-        configs = call("sharing.nfs.query", [["id", "=", nfsid]])
-        assert configs != []
-        share_config = configs[0]
-        yield copy(share_config)
-    finally:
-        excl = ['id', 'path', 'locked']
-        [share_config.pop(key) for key in excl]
-        call("sharing.nfs.update", nfsid, share_config)
-
-
-@pytest.fixture(scope="module")
-def init_nfs():
-    """ Will restore to _default_ config and state at module exit """
-    try:
-        initial_config = call("nfs.config")
-        NFS_CONFIG.initial_nfs_config = copy(initial_config)
-
-        initial_service_state = call('service.query', [['service', '=', 'nfs']], {'get': True})
-        NFS_CONFIG.initial_service_state = copy(initial_service_state)
-
-        yield {"config": initial_config, "service_state": initial_service_state}
-    finally:
-        # Restore to -default- state  (some might be redundant, but ensures clean state at exit)
-        call('service.update', 'nfs', {'enable': NFS_CONFIG.default_service_state['enable']})
-        state_cmd = {'RUNNING': 'start', 'STOPPED': 'stop'}
-        set_nfs_service_state(state_cmd[NFS_CONFIG.default_service_state['state']])
-
-        # Restore to -default- config
-        exclude = ['servers', 'v4_krb_enabled', 'v4_owner_major', 'keytab_has_nfs_spn', 'managed_nfsd']
-        default_config_payload = {k: v for k, v in NFS_CONFIG.default_config.items() if k not in exclude}
-        if NFS_CONFIG.default_config['managed_nfsd']:
-            default_config_payload['servers'] = None
-        call('nfs.update', default_config_payload)
-
-
-@pytest.fixture(scope="module")
-def nfs_dataset_and_share():
-    """ Will delete the 'nfs' share and dataset at the module exit """
-    with nfs_dataset('nfs') as ds:
-        with nfs_share(NFS_PATH, {
-                "comment": "My Test Share",
-                "security": ["SYS"]
-        }) as nfsid:
-            yield {"nfsid": nfsid, "ds": ds}
-
-
-@pytest.fixture(scope="class")
-def start_nfs():
-    """ The exit state is managed by init_nfs """
-    try:
-        yield set_nfs_service_state('start')
-    finally:
-        set_nfs_service_state('stop')
-
-
-# =====================================================================
-#                           Tests
-# =====================================================================
-
-def test_config(init_nfs):
-    initial_config = init_nfs['config']
-    initial_service_state = init_nfs['service_state']
-
-    # We should be starting with the default config
-    # Check the hard way so that we can identify the culprit
-    for k, v in NFS_CONFIG.default_config.items():
-        assert initial_config.get(k) == v, f'Expected {k}:"{v}", but found {k}:"{initial_config.get(k)}"'
-
-    # Confirm NFS is not running
-    assert initial_service_state['state'] == 'STOPPED', \
-        f"Before update, expected STOPPED, but found {initial_service_state['state']}"
-
-
-def test_service_enable_at_boot(init_nfs):
-    initial_run_state = init_nfs['service_state']
-    assert initial_run_state['enable'] is False
-
-    svc_id = call('service.update', 'nfs', {"enable": True})
-    nfs_state = call('service.query', [["id", "=", svc_id]])
-    assert nfs_state[0]['service'] == "nfs"
-    assert nfs_state[0]['enable'] is True
-
-
-def test_dataset_permissions(nfs_dataset_and_share):
-    ds = nfs_dataset_and_share["ds"]
-    call('filesystem.setperm', {
-        'path': os.path.join('/mnt', ds),
-        'mode': '777',
-        'uid': 0,
-        'gid': 0,
-    }, job=True)
-
-
-class TestNFSops:
-    """
-    Test NFS operations: server running
-    """
-    def test_state_directory(self, start_nfs):
-        """
-        By default, the NFS state directory is at /var/lib/nfs.
-        To support HA systems, we moved this to the system dataset
-        at /var/db/system/nfs.  In support of this we updated the
-        NFS conf file settings
-        """
-        assert start_nfs is True
-
-        # Make sure the conf file has the expected settings
-        sysds_path = call('systemdataset.sysdataset_path')
-        assert sysds_path == '/var/db/system'
-        nfs_state_dir = os.path.join(sysds_path, 'nfs')
-        s = parse_server_config()
-        assert s['exportd']['state-directory-path'] == nfs_state_dir, str(s)
-        assert s['nfsdcld']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcld'), str(s)
-        assert s['nfsdcltrack']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcltrack'), str(s)
-        assert s['nfsdcld']['storagedir'] == os.path.join(nfs_state_dir, 'nfsdcld'), str(s)
-        assert s['mountd']['state-directory-path'] == nfs_state_dir, str(s)
-        assert s['statd']['state-directory-path'] == nfs_state_dir, str(s)
-
-        # Confirm we have the mount point in the system dataset
-        sysds = call('systemdataset.config')
-        bootds = call('systemdataset.get_system_dataset_spec', sysds['pool'], sysds['uuid'])
-        bootds_nfs = list([d for d in bootds if 'nfs' in d.get('name')])[0]
-        assert bootds_nfs['name'] == sysds['pool'] + "/.system/nfs"
-
-        # Confirm the required entries are present
-        required_nfs_entries = set(["nfsdcld", "nfsdcltrack", "sm", "sm.bak", "state", "v4recovery"])
-        current_nfs_entries = set(list(ssh(f'ls {nfs_state_dir}').splitlines()))
-        assert required_nfs_entries.issubset(current_nfs_entries)
-
-        # Confirm proc entry reports expected value after nfs restart
-        call('service.restart', 'nfs')
-        sleep(1)
-        recovery_dir = ssh('cat /proc/fs/nfsd/nfsv4recoverydir').strip()
-        assert recovery_dir == os.path.join(nfs_state_dir, 'v4recovery'), \
-            f"Expected {nfs_state_dir + '/v4recovery'} but found {recovery_dir}"
-        # ----------------------------------------------------------------------
-        # NOTE: Test fresh-install and upgrade.
-        # ----------------------------------------------------------------------
-
-    @pytest.mark.parametrize('vers', [3, 4])
-    def test_basic_nfs_ops(self, start_nfs, nfs_dataset_and_share, vers):
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-
-        with SSH_NFS(truenas_server.ip, NFS_PATH, vers=vers, user=user,
-                     password=password, ip=truenas_server.ip) as n:
-            n.create('testfile')
-            n.mkdir('testdir')
-            contents = n.ls('.')
-            assert 'testdir' in contents
-            assert 'testfile' in contents
-
-            n.unlink('testfile')
-            n.rmdir('testdir')
-            contents = n.ls('.')
-            assert 'testdir' not in contents
-            assert 'testfile' not in contents
-
-    def test_server_side_copy(self, start_nfs, nfs_dataset_and_share):
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, user=user,
-                     password=password, ip=truenas_server.ip) as n:
-            n.server_side_copy('ssc1', 'ssc2')
-
-    @pytest.mark.parametrize('nfsd,cores,expected', [
-        pp(50, 1, {'nfsd': 50, 'mountd': 12, 'managed': False}, id="User set 50: expect 12 mountd"),
-        pp(None, 12, {'nfsd': 12, 'mountd': 3, 'managed': True}, id="12 cores: expect 12 nfsd, 3 mountd"),
-        pp(None, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}, id="4 cores: expect 4 nfsd, 1 mountd"),
-        pp(None, 2, {'nfsd': 2, 'mountd': 1, 'managed': True}, id="2 cores: expect 2 nfsd, 1 mountd"),
-        pp(None, 1, {'nfsd': 1, 'mountd': 1, 'managed': True}, id="1 core: expect 1 nfsd, 1 mountd"),
-        pp(0, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}, id="User set 0: invalid"),
-        pp(257, 4, {'nfsd': 4, 'mountd': 1, 'managed': True}, id="User set 257: invalid"),
-        pp(None, 48, {'nfsd': 32, 'mountd': 8, 'managed': True}, id="48 cores: expect 32 nfsd (max), 8 mountd"),
-        pp(-1, 48, {'nfsd': 32, 'mountd': 8, 'managed': True}, id="Reset to 'managed_nfsd'"),
-    ])
-    def test_service_update(self, start_nfs, nfsd, cores, expected):
-        """
-        This test verifies that service can be updated in general,
-        and also that the 'servers' key can be altered.
-        Latter goal is achieved by reading the nfs config file
-        and verifying that the value here was set correctly.
-
-        Update:
-        The default setting for 'servers' is None. This specifies that we dynamically
-        determine the number of nfsd to start based on the capabilities of the system.
-        In this state, we choose one nfsd for each CPU core.
-        The user can override the dynamic calculation by specifying a
-        number greater than zero.
-
-        The number of mountd will be 1/4 the number of nfsd.
-        """
-        assert start_nfs is True
-
-        with mock("system.cpu_info", return_value={"core_count": cores}):
-
-            # Use 0 as 'null' flag
-            if nfsd is None or nfsd in range(1, 257):
-                call("nfs.update", {"servers": nfsd})
-
-                s = parse_server_config()
-                assert int(s['nfsd']['threads']) == expected['nfsd'], str(s)
-                assert int(s['mountd']['threads']) == expected['mountd'], str(s)
-
-                confirm_nfsd_processes(expected['nfsd'])
-                confirm_mountd_processes(expected['mountd'])
-                confirm_rpc_processes()
-
-                # In all passing cases, the 'servers' field represents the number of expected nfsd
-                nfs_conf = call("nfs.config")
-                assert nfs_conf['servers'] == expected['nfsd']
-                assert nfs_conf['managed_nfsd'] == expected['managed']
-            else:
-                if nfsd == -1:
-                    # We know apriori that the current state is managed_nfsd == True
-                    with nfs_config():
-                        # Test making change to non-'server' setting does not change managed_nfsd
-                        assert call("nfs.config")['managed_nfsd'] == expected['managed']
-                else:
-                    with pytest.raises(ValidationErrors) as ve:
-                        assert call("nfs.config")['managed_nfsd'] == expected['managed']
-                        call("nfs.update", {"servers": nfsd})
-
-                    assert ve.value.errors == [ValidationError('nfs_update.servers', 'Should be between 1 and 256', 22)]
-
-    def test_share_update(self, start_nfs, nfs_dataset_and_share):
-        """
-        Test changing the security and enabled fields
-        We want nfs running to allow confirmation of changes in exportfs
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        nfsid = nfs_dataset_and_share['nfsid']
-        with nfs_share_config(nfsid) as share_data:
-            assert share_data['security'] != []
-            nfs_share = call('sharing.nfs.update', nfsid, {"security": [], "comment": "no comment"})
-
-            # The default is 'SYS', so changing from ['SYS'] to [] does not change /etc/exports
-            assert nfs_share['security'] == [], f"Expected [], but found {nfs_share[0]['security']}"
-            assert nfs_share['comment'] == "no comment"
-
-            # Confirm changes are reflected in /etc/exports
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-            export_opts = parsed[0]['opts'][0]['parameters']
-            assert "sec=sys" in export_opts
-
-            # Test share disable
-            assert share_data['enabled'] is True
-            nfs_share = call('sharing.nfs.update', nfsid, {"enabled": False})
-            assert parse_exports() == []
-
-    @pytest.mark.parametrize(
-        "networklist,ExpectedToPass,FailureMsg", [
-            # IPv4
-            pp(["192.168.0.0/24", "192.168.1.0/24"], True, "", id="IPv4 - non-overlap"),
-            pp(["192.168.0.0/16", "192.168.1.0/24"], False, "Overlapped", id="IPv4 - overlap wide"),
-            pp(["192.168.0.0/24", "192.168.0.211/32"], False, "Overlapped", id="IPv4 - overlap narrow"),
-            pp(["192.168.0.0/64"], False, "does not appear to be an IPv4 or IPv6 network", id="IPv4 - invalid range"),
-            pp(["bogus_network"], False, "does not appear to be an IPv4 or IPv6 network", id="IPv4 - invalid format"),
-            pp(["192.168.27.211"], True, "", id="IPv4 - auto-convert to CIDR"),
-            # IPv6
-            pp(["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2f::/96"],
-               True, "", id="IPv6 - non-overlap"),
-            pp(["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2f::/88"],
-               False, "Overlapped", id="IPv6 - overlap wide"),
-            pp(["2001:0db8:85a3:0000:0000:8a2e::/96", "2001:0db8:85a3:0000:0000:8a2e:0370:7334/128"],
-               False, "Overlapped", id="IPv6 - overlap narrow"),
-            pp(["2001:0db8:85a3:0000:0000:8a2e:0370:7334/256"],
-               False, "does not appear to be an IPv4 or IPv6 network", id="IPv6 - invalid range"),
-            pp(["2001:0db8:85a3:0000:0000:8a2e:0370:7334"],
-               True, "", id="IPv6 - auto-convert to CIDR"),
-        ],
-    )
-    def test_share_networks(
-            self, start_nfs, nfs_dataset_and_share, networklist, ExpectedToPass, FailureMsg):
-        """
-        Verify that adding a network generates an appropriate line in exports
-        file for same path. Sample:
-
-        "/mnt/dozer/nfs"\
-            192.168.0.0/24(sec=sys,rw,subtree_check)\
-            192.168.1.0/24(sec=sys,rw,subtree_check)
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        nfsid = nfs_dataset_and_share['nfsid']
-
-        with nfs_share_config(nfsid):
-            if ExpectedToPass:
-                call('sharing.nfs.update', nfsid, {'networks': networklist})
-            else:
-                with pytest.raises(ValidationErrors) as re:
-                    call('sharing.nfs.update', nfsid, {'networks': networklist})
-                assert FailureMsg in str(re.value.errors[0])
-
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-
-            exports_networks = [x['host'] for x in parsed[0]['opts']]
-            if ExpectedToPass:
-                # The input is converted to CIDR format which often will
-                # look different from the input. e.g. 1.2.3.4/16 -> 1.2.0.0/16
-                cidr_list = [str(ipaddress.ip_network(x, strict=False)) for x in networklist]
-
-                # The entry should be present
-                diff = set(cidr_list) ^ set(exports_networks)
-                assert len(diff) == 0, f'diff: {diff}, exports: {parsed}'
-            else:
-                # The entry should NOT be present
-                assert len(exports_networks) == 1, str(parsed)
-
-    @pytest.mark.parametrize(
-        "hostlist,ExpectedToPass,FailureMsg", [
-            pp(["192.168.0.69", "192.168.0.70", "@fakenetgroup"],
-               True, "", id="Valid - IPv4 address, netgroup"),
-            pp(["asdfnm-*", "?-asdfnm-*", "asdfnm[0-9]", "nmix?-*dev[0-9]"],
-               True, "", id="Valid - wildcard names,ranges"),
-            pp(["asdfdm-*.example.com", "?-asdfdm-*.ixsystems.com",
-                "asdfdm[0-9].example.com", "dmix?-*dev[0-9].ixsystems.com"],
-               True, "", id="Valid - wildcard domains,ranges"),
-            pp(["-asdffail", "*.asdffail.com", "*.*.com", "bozofail.?.*"],
-               False, "Unable to resolve", id="Invalid - names,domains (not resolvable)"),
-            pp(["bogus/name"], False, "Unable to resolve", id="Invalid - name (path)"),
-            pp(["192.168.1.0/24"], False, "Unable to resolve", id="Invalid - name (network format)"),
-            pp(["asdfdm[0-9].example.com", "-asdffail", "devteam-*.ixsystems.com", "*.asdffail.com"],
-               False, "Unable to resolve", id="Mix - valid and invalid names"),
-            pp(["192.168.1.0", "192.168.1.0"], False, "not unique", id="Invalid - duplicate address"),
-            pp(["ixsystems.com", "ixsystems.com"], False, "not unique", id="Invalid - duplicate address"),
-            pp(["ixsystems.com", "*"], True, "", id="Valid - mix name and everybody"),
-            pp(["*", "*.ixsystems.com"], True, "", id="Valid - mix everybody and wildcard name"),
-            pp(["192.168.1.o"], False, "Unable to resolve", id="Invalid - character in address"),
-            pp(["bad host"], False, "cannot contain spaces", id="Invalid - name with spaces"),
-            pp(["2001:0db8:85a3:0000:0000:8a2e:0370:7334"], True, "", id="Valid - IPv6 address")
-        ],
-    )
-    def test_share_hosts(
-            self, start_nfs, nfs_dataset_and_share, hostlist, ExpectedToPass, FailureMsg):
-        """
-        Verify that adding a network generates an appropriate line in exports
-        file for same path. Sample:
-
-        "/mnt/dozer/nfs"\
-            192.168.0.69(sec=sys,rw,subtree_check)\
-            192.168.0.70(sec=sys,rw,subtree_check)\
-            @fakenetgroup(sec=sys,rw,subtree_check)
-
-        host name handling in middleware:
-            If the host name contains no wildcard or special chars,
-                then we test it with a look up
-            else we apply the host name rules and skip the look up
-
-        The rules for the host field are:
-        - Dashes are allowed, but a level cannot start or end with a dash, '-'
-        - Only the left most level may contain special characters: '*','?' and '[]'
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        nfsid = nfs_dataset_and_share['nfsid']
-
-        with nfs_share_config(nfsid):
-            if ExpectedToPass:
-                call('sharing.nfs.update', nfsid, {'hosts': hostlist})
-            else:
-                with pytest.raises(ValidationErrors) as re:
-                    call('sharing.nfs.update', nfsid, {'hosts': hostlist})
-                assert FailureMsg in str(re.value.errors[0])
-
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-
-            # Check the exports file
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-            exports_hosts = [x['host'] for x in parsed[0]['opts']]
-            if ExpectedToPass:
-                # The entry should be present
-                diff = set(hostlist) ^ set(exports_hosts)
-                assert len(diff) == 0, f'diff: {diff}, exports: {parsed}'
-            else:
-                # The entry should not be present
-                assert len(exports_hosts) == 1, str(parsed)
-
-    def test_share_ro(self, start_nfs, nfs_dataset_and_share):
-        """
-        Verify that toggling `ro` will cause appropriate change in
-        exports file. We also verify with write tests on a local mount.
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        nfsid = nfs_dataset_and_share['nfsid']
-
-        with nfs_share_config(nfsid) as share_data:
-            # Confirm 'rw' initial state and create a file and dir
-            assert share_data['ro'] is False
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-            assert "rw" in parsed[0]['opts'][0]['parameters'], str(parsed)
-
-            # Mount the share locally and create a file and dir
-            with SSH_NFS(truenas_server.ip, NFS_PATH,
-                         user=user, password=password, ip=truenas_server.ip) as n:
-                n.create("testfile_should_pass")
-                n.mkdir("testdir_should_pass")
-
-            # Change to 'ro'
-            call('sharing.nfs.update', nfsid, {'ro': True})
-
-            # Confirm 'ro' state and behavior
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-            assert "rw" not in parsed[0]['opts'][0]['parameters'], str(parsed)
-
-            # Attempt create and delete
-            with SSH_NFS(truenas_server.ip, NFS_PATH,
-                         user=user, password=password, ip=truenas_server.ip) as n:
-                with pytest.raises(RuntimeError) as re:
-                    n.create("testfile_should_fail")
-                    assert False, "Should not have been able to create a new file"
-                assert 'cannot touch' in str(re), re
-
-                with pytest.raises(RuntimeError) as re:
-                    n.mkdir("testdir_should_fail")
-                    assert False, "Should not have been able to create a new directory"
-                assert 'cannot create directory' in str(re), re
-
-    def test_share_maproot(self, start_nfs, nfs_dataset_and_share):
-        """
-        root squash is always enabled, and so maproot accomplished through
-        anonuid and anongid
-
-        Sample:
-        "/mnt/dozer/NFSV4"\
-            *(sec=sys,rw,anonuid=65534,anongid=65534,subtree_check)
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        nfsid = nfs_dataset_and_share['nfsid']
-
-        with nfs_share_config(nfsid) as share_data:
-            # Confirm we won't compete against mapall
-            assert share_data['mapall_user'] is None
-            assert share_data['mapall_group'] is None
-
-            # Map root to everybody
-            call('sharing.nfs.update', nfsid, {
-                'maproot_user': 'nobody',
-                'maproot_group': 'nogroup'
-            })
-
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-
-            params = parsed[0]['opts'][0]['parameters']
-            assert 'anonuid=65534' in params, str(parsed)
-            assert 'anongid=65534' in params, str(parsed)
-            # TODO: Run test as nobody, expect success
-
-            # Setting maproot_user and maproot_group to root should
-            # cause us to append "no_root_squash" to options.
-            call('sharing.nfs.update', nfsid, {
-                'maproot_user': 'root',
-                'maproot_group': 'root'
-            })
-
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-            params = parsed[0]['opts'][0]['parameters']
-            assert 'no_root_squash' in params, str(parsed)
-            assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
-            # TODO: Run test as nobody, expect failure
-
-            # Second share should have normal (no maproot) params.
-            second_share = f'/mnt/{pool_name}/second_share'
-            with nfs_dataset('second_share'):
-                with nfs_share(second_share):
-                    parsed = parse_exports()
-                    assert len(parsed) == 2, str(parsed)
-
-                    params = parsed[0]['opts'][0]['parameters']
-                    assert 'no_root_squash' in params, str(parsed)
-
-                    params = parsed[1]['opts'][0]['parameters']
-                    assert 'no_root_squash' not in params, str(parsed)
-                    assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
-
-        # After share config restore, confirm expected settings
-        parsed = parse_exports()
-        assert len(parsed) == 1, str(parsed)
-        params = parsed[0]['opts'][0]['parameters']
-
-        assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
-
-    def test_share_mapall(self, start_nfs, nfs_dataset_and_share):
-        """
-        mapall is accomplished through anonuid and anongid and
-        setting 'all_squash'.
-
-        Sample:
-        "/mnt/dozer/NFSV4"\
-            *(sec=sys,rw,all_squash,anonuid=65534,anongid=65534,subtree_check)
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        nfsid = nfs_dataset_and_share['nfsid']
-
-        with nfs_share_config(nfsid) as share_data:
-            # Confirm we won't compete against maproot
-            assert share_data['maproot_user'] is None
-            assert share_data['maproot_group'] is None
-
-            call('sharing.nfs.update', nfsid, {
-                'mapall_user': 'nobody',
-                'mapall_group': 'nogroup'
-            })
-
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-
-            params = parsed[0]['opts'][0]['parameters']
-            assert 'anonuid=65534' in params, str(parsed)
-            assert 'anongid=65534' in params, str(parsed)
-            assert 'all_squash' in params, str(parsed)
-
-        # After share config restore, confirm settings
-        parsed = parse_exports()
-        assert len(parsed) == 1, str(parsed)
-        params = parsed[0]['opts'][0]['parameters']
-
-        assert not any(filter(lambda x: x.startswith('anon'), params)), str(parsed)
-        assert 'all_squash' not in params, str(parsed)
-
-    def test_subtree_behavior(self, start_nfs, nfs_dataset_and_share):
-        """
-        If dataset mountpoint is exported rather than simple dir,
-        we disable subtree checking as an optimization. This check
-        makes sure we're doing this as expected:
-
-        Sample:
-        "/mnt/dozer/NFSV4"\
-            *(sec=sys,rw,no_subtree_check)
-        "/mnt/dozer/NFSV4/foobar"\
-            *(sec=sys,rw,subtree_check)
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-
-        with directory(f'{NFS_PATH}/sub1') as tmp_path:
-            with nfs_share(tmp_path, {'hosts': ['127.0.0.1']}):
-                parsed = parse_exports()
-                assert len(parsed) == 2, str(parsed)
-
-                assert parsed[0]['path'] == NFS_PATH, str(parsed)
-                assert 'no_subtree_check' in parsed[0]['opts'][0]['parameters'], str(parsed)
-
-                assert parsed[1]['path'] == tmp_path, str(parsed)
-                assert 'subtree_check' in parsed[1]['opts'][0]['parameters'], str(parsed)
-
-    def test_nonroot_behavior(self, start_nfs, nfs_dataset_and_share):
-        """
-        If global configuration option "allow_nonroot" is set, then
-        we append "insecure" to each exports line.
-        Since this is a global option, it triggers an nfsd restart
-        even though it's not technically required.
-        Linux will, by default, mount using a priviledged port (1..1023)
-        MacOS NFS mounts do not follow this 'standard' behavior.
-
-        Four conditions to test:
-            server:  secure       (e.g. allow_nonroot is False)
-                client: resvport   -> expect to pass.
-                client: noresvport -> expect to fail.
-            server: insecure    (e.g. allow_nonroot is True)
-                client: resvport   -> expect to pass.
-                client: noresvport -> expect to pass
-
-        Sample:
-        "/mnt/dozer/NFSV4"\
-            *(sec=sys,rw,insecure,no_subtree_check)
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-
-        # Verify that NFS server configuration is as expected
-        with nfs_config() as nfs_conf_orig:
-
-            # --- Test: allow_nonroot is False ---
-            assert nfs_conf_orig['allow_nonroot'] is False, nfs_conf_orig
-
-            # Confirm setting in /etc/exports
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-            assert 'insecure' not in parsed[0]['opts'][0]['parameters'], str(parsed)
-
-            # Confirm we allow mounts from 'root' ports
-            with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
-                         user=user, password=password, ip=truenas_server.ip):
-                client_port = get_client_nfs_port()
-                assert client_port[1] is not None, f"Failed to get client port: f{client_port[0]}"
-                assert int(client_port[1]) < 1024, \
-                    f"client_port is not in 'root' range: {client_port[1]}\n{client_port[0]}"
-
-            # Confirm we block mounts from 'non-root' ports
-            with pytest.raises(RuntimeError) as re:
-                with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, options=['noresvport'],
-                             user=user, password=password, ip=truenas_server.ip):
-                    pass
-                # We should not get to this assert
-                assert False, "Unexpected success with mount"
-            assert 'Operation not permitted' in str(re), re
-
-            # --- Test: allow_nonroot is True ---
-            new_nfs_conf = call('nfs.update', {"allow_nonroot": True})
-            assert new_nfs_conf['allow_nonroot'] is True, new_nfs_conf
-
-            parsed = parse_exports()
-            assert len(parsed) == 1, str(parsed)
-            assert 'insecure' in parsed[0]['opts'][0]['parameters'], str(parsed)
-
-            # Confirm we allow mounts from 'root' ports
-            with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
-                         user=user, password=password, ip=truenas_server.ip):
-                client_port = get_client_nfs_port()
-                assert client_port[1] is not None, "Failed to get client port"
-                assert int(client_port[1]) < 1024, \
-                    f"client_port is not in 'root' range: {client_port[1]}\n{client_port[0]}"
-
-            # Confirm we allow mounts from 'non-root' ports
-            with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4, options=['noresvport'],
-                         user=user, password=password, ip=truenas_server.ip):
-                client_port = get_client_nfs_port()
-                assert client_port[1] is not None, "Failed to get client port"
-                assert int(client_port[1]) >= 1024, \
-                    f"client_port is not in 'non-root' range: {client_port[1]}\n{client_port[0]}"
-
-        # Confirm setting was returned to original state
-        parsed = parse_exports()
-        assert len(parsed) == 1, str(parsed)
-        assert 'insecure' not in parsed[0]['opts'][0]['parameters'], str(parsed)
-
-    def test_syslog_filters(self, start_nfs, nfs_dataset_and_share):
-        """
-        This test checks the function of the mountd_log setting to filter
-        rpc.mountd messages that have priority DEBUG to NOTICE.
-        We performing loopback mounts on the remote TrueNAS server and
-        then check the syslog for rpc.mountd messages.  Outside of SSH_NFS
-        we test the umount case.
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-        test_marker = random_string()
-
-        with nfs_config():
-
-            # The effect is much more clear if there are many mountd.
-            # We can force this by configuring many nfsd
-            call("nfs.update", {"servers": 24})
-
-            # Confirm default setting: mountd logging enabled
-            call("nfs.update", {"mountd_log": True})
-
-            # Add a marker to indicate the expectation of messages
-            ssh(f'logger "====== {test_marker} START_NFS_SYSLOG_FILTER_TEST ======"')
-
-            # Mount twice to generate plenty messages
-            ssh('logger "mount once"')
-            with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
-                         user=user, password=password, ip=truenas_server.ip) as n:
-                n.ls('/')
-
-            ssh('logger "mount twice"')
-            with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
-                         user=user, password=password, ip=truenas_server.ip) as n:
-                n.ls('/')
-
-            # Disable mountd logging
-            call("nfs.update", {"mountd_log": False})
-
-            # Add a marker to indicate the expectation of no messages
-            ssh(f'logger "====== {test_marker} END_NFS_SYSLOG_FILTER_TEST ======"')
-
-            # Mount twice to generate plenty of opportunity for messages
-            ssh('logger "mount once"')
-            with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
-                         user=user, password=password, ip=truenas_server.ip) as n:
-                n.ls('/')
-
-            ssh('logger "mount twice"')
-            with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
-                         user=user, password=password, ip=truenas_server.ip) as n:
-                n.ls('/')
-
-            # Add a marker to indicate the end of
-            ssh(f'logger "====== {test_marker} STOP_NFS_SYSLOG_FILTER_TEST ======"')
-
-        # Wait a few seconds for messages to flush
-        sleep(5)
-
-        # Process syslog
-        log_data = ssh("tail -200 /var/log/syslog").replace('\n', '')
-        data_with_msg = re.findall(f"{test_marker} START.*{test_marker} END", log_data)[0]
-        assert 'rpc.mountd in data_with_msg', data_with_msg
-        data_without_msg = re.findall(f"{test_marker} END.*{test_marker} STOP", log_data)[0]
-        assert 'rpc.mountd' not in data_without_msg
-
-    def test_client_status(self, start_nfs, nfs_dataset_and_share):
-        """
-        This test checks the function of API endpoints to list NFSv3 and
-        NFSv4 clients by performing loopback mounts on the remote TrueNAS
-        server and then checking client counts. Due to inherent imprecision
-        of counts over NFSv3 protcol (specifically with regard to decrementing
-        sessions) we only verify that count is non-zero for NFSv3.
-        """
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-
-        with SSH_NFS(truenas_server.ip, NFS_PATH, vers=3,
-                     user=user, password=password, ip=truenas_server.ip):
-            res = call('nfs.get_nfs3_clients', [], {'count': True})
-            assert int(res) != 0
-
-        with SSH_NFS(truenas_server.ip, NFS_PATH, vers=4,
-                     user=user, password=password, ip=truenas_server.ip):
-            res = call('nfs.get_nfs4_clients', [], {'count': True})
-            assert int(res) == 1, f"Expected to find 1, but found {int(res)}"
-
-        # # Enable this when CI environment supports IPv6
-        # # NAS-130437: Confirm IPv6 support
-        # try:
-        #     # Get the IPv6 equivalent of truenas_server.ip
-        #     ip_info = call(
-        #         'interface.query',
-        #         [["aliases.*.address", "=", truenas_server.ip]], {"get": True}
-        #     )
-        #     devname = ip_info['name']
-        #     aliases = ip_info['state']['aliases']
-
-        #     ipv6_addr = list(filter(lambda x: x['type'] == 'INET6', aliases))[0]['address']
-
-        #     ipv6_mp = '/mnt/nfs_ipv6'
-        #     ssh(f"mkdir -p {ipv6_mp}")
-
-        #     # zsh requires the 'server' part to be encapsulated in quotes due to square brackets
-        #     ssh(f'mount "[{ipv6_addr}%{devname}]":{NFS_PATH} {ipv6_mp}')
-
-        #     # Confirm we can process get_nfs4_clients that use IPv6 addresses
-        #     nfs4_client_list = call("nfs.get_nfs4_clients")
-        #     assert len(nfs4_client_list) == 1
-        #     assert ipv6_addr in nfs4_client_list[0]['info']['callback address']
-
-        # finally:
-        #     ssh(f"umount -f {ipv6_mp}")
-        #     ssh(f"rmdir {ipv6_mp}")
-
-    @pytest.mark.parametrize('type,data', [
-        pp('InvalidAssignment', [
-            {'maproot_user': 'baduser'}, 'maproot_user', 'User not found: baduser'
-        ], id="invalid maproot user"),
-        pp('InvalidAssignment', [
-            {'maproot_group': 'badgroup'}, 'maproot_user', 'This field is required when map group is specified'
-        ], id="invalid maproot group"),
-        pp('InvalidAssignment', [
-            {'mapall_user': 'baduser'}, 'mapall_user', 'User not found: baduser'
-        ], id="invalid mapall user"),
-        pp('InvalidAssignment', [
-            {'mapall_group': 'badgroup'}, 'mapall_user', 'This field is required when map group is specified'
-        ], id="invalid mapall group"),
-        pp('MissingUser', ['maproot_user', 'missinguser'], id="missing maproot user"),
-        pp('MissingUser', ['mapall_user', 'missinguser'], id="missing mapall user"),
-        pp('MissingGroup', ['maproot_group', 'missingroup'], id="missing maproot group"),
-        pp('MissingGroup', ['mapall_group', 'missingroup'], id="missing mapall group"),
-    ])
-    def test_invalid_user_group_mapping(self, start_nfs, nfs_dataset_and_share, type, data):
-        '''
-        Verify we properly trap and handle invalid user and group mapping
-        Two conditions:
-            1) Catch invalid assignments
-            2) Catch invalid settings at NFS start
-        '''
-        assert start_nfs is True
-        assert nfs_dataset_and_share['nfsid'] is not None
-
-        ''' Test Processing '''
-        with directory(f'{NFS_PATH}/sub1') as tmp_path:
-
-            if type == 'InvalidAssignment':
-                payload = {'path': tmp_path} | data[0]
-                with pytest.raises(ValidationErrors) as ve:
-                    call("sharing.nfs.create", payload)
-                assert ve.value.errors == [ValidationError('sharingnfs_create.' + f'{data[1]}', data[2], 22)]
-
-            elif type == 'MissingUser':
-                usrname = data[1]
-                testkey, testval = data[0].split('_')
-
-                usr_payload = {'username': usrname, 'full_name': usrname,
-                               'group_create': True, 'password': 'abadpassword'}
-                mapping = {data[0]: usrname}
-                with create_user(usr_payload) as usrInst:
-                    with nfs_share(tmp_path, mapping) as share:
-                        run_missing_usrgrp_mapping_test(data, testval, tmp_path, share, usrInst)
-
-            elif type == 'MissingGroup':
-                # Use a built-in user for the group test
-                grpname = data[1]
-                testkey, testval = data[0].split('_')
-
-                mapping = {f"{testkey}_user": 'ftp', data[0]: grpname}
-                with create_group({'name': grpname}) as grpInst:
-                    with nfs_share(tmp_path, mapping) as share:
-                        run_missing_usrgrp_mapping_test(data, testval, tmp_path, share, grpInst)
-
-    def test_service_protocols(self, start_nfs):
-        """
-        This test verifies that changing the `protocols` option generates expected
-        changes in nfs kernel server config.  In most cases we will also confirm
-        the settings have taken effect.
-
-        For the time being this test will also exercise the deprecated `v4` option
-        to the same effect, but this will later be removed.
-
-        NFS must be enabled for this test to succeed as while the config (i.e.
-        database) will be updated regardless, the server config file will not
-        be updated.
-        TODO: Add client side tests
-        """
-        assert start_nfs is True
-
-        # Multiple restarts cause systemd failures.  Reset the systemd counters.
-        reset_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd")
-
-        with nfs_config() as nfs_conf_orig:
-            # Check existing config (both NFSv3 & NFSv4 configured)
-            assert "NFSV3" in nfs_conf_orig['protocols'], nfs_conf_orig
-            assert "NFSV4" in nfs_conf_orig['protocols'], nfs_conf_orig
-            s = parse_server_config()
-            assert s['nfsd']["vers3"] == 'y', str(s)
-            assert s['nfsd']["vers4"] == 'y', str(s)
-            confirm_nfs_version(['3', '4'])
-
-            # Turn off NFSv4 (v3 on)
-            new_config = call('nfs.update', {"protocols": ["NFSV3"]})
-            assert "NFSV3" in new_config['protocols'], new_config
-            assert "NFSV4" not in new_config['protocols'], new_config
-            s = parse_server_config()
-            assert s['nfsd']["vers3"] == 'y', str(s)
-            assert s['nfsd']["vers4"] == 'n', str(s)
-
-            # Confirm setting has taken effect: v4->off, v3->on
-            confirm_nfs_version(['3'])
-
-            # Confirm we trap invalid setting
-            with pytest.raises(ValidationError) as ve:
-                call("nfs.update", {"protocols": []})
-            assert "nfs_update.protocols" == ve.value.attribute
-            assert "at least one" in str(ve.value)
-
-            # Turn off NFSv3 (v4 on)
-            new_config = call('nfs.update', {"protocols": ["NFSV4"]})
-            assert "NFSV3" not in new_config['protocols'], new_config
-            assert "NFSV4" in new_config['protocols'], new_config
-            s = parse_server_config()
-            assert s['nfsd']["vers3"] == 'n', str(s)
-            assert s['nfsd']["vers4"] == 'y', str(s)
-
-            # Confirm setting has taken effect: v4->on, v3->off
-            confirm_nfs_version(['4'])
-
-        # Finally, confirm both are re-enabled
-        nfs_conf = call('nfs.config')
-        assert "NFSV3" in nfs_conf['protocols'], nfs_conf
-        assert "NFSV4" in nfs_conf['protocols'], nfs_conf
-        s = parse_server_config()
-        assert s['nfsd']["vers3"] == 'y', str(s)
-        assert s['nfsd']["vers4"] == 'y', str(s)
-
-        # Confirm setting has taken effect: v4->on, v3->on
-        confirm_nfs_version(['3', '4'])
-
-    def test_service_udp(self, start_nfs):
-        """
-        This test verifies the udp config is NOT in the DB and
-        that it is NOT in the etc file.
-        """
-        assert start_nfs is True
-
-        # The 'udp' setting should have been removed
-        nfs_conf = call('nfs.config')
-        assert nfs_conf.get('udp') is None, nfs_conf
-
-        s = parse_server_config()
-        assert s.get('nfsd', {}).get('udp') is None, s
-
-    @pytest.mark.parametrize('test_port', [
-        pp([["mountd", 618, None], ["rpcstatd", 871, None], ["rpclockd", 32803, None]], id="valid ports"),
-        pp([["rpcstatd", -21, 0], ["rpclockd", 328031, 0]], id="invalid ports"),
-        pp([["mountd", 20049, 1]], id="excluded ports"),
-    ])
-    def test_service_ports(self, start_nfs, test_port):
-        """
-        This test verifies that we can set custom port and the
-        settings are reflected in the relevant files and are active.
-        This also tests the port range and exclude.
-        """
-        assert start_nfs is True
-        # Multiple restarts cause systemd failures.  Reset the systemd counters.
-        reset_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd")
-
-        # Friendly index names
-        name = 0
-        value = 1
-        err = 2
-
-        # Error message snippets
-        errmsg = ["Should be between", "reserved for internal use"]
-
-        # Test ports
-        for port in test_port:
-            port_name = port[name] + "_port"
-            if port[err] is None:
-                nfs_conf = call("nfs.update", {port_name: port[value]})
-                assert nfs_conf[port_name] == port[value]
-            else:
-                with pytest.raises(ValidationErrors) as ve:
-                    nfs_conf = call("nfs.update", {port_name: port[value]})
-                errStr = str(ve.value.errors[0])
-                assert errmsg[port[err]] in errStr
-
-        # Compare DB with setting in /etc/nfs.conf.d/local.conf
-        with nfs_config() as config_db:
-            s = parse_server_config()
-            assert int(s['mountd']['port']) == config_db["mountd_port"], str(s)
-            assert int(s['statd']['port']) == config_db["rpcstatd_port"], str(s)
-            assert int(s['lockd']['port']) == config_db["rpclockd_port"], str(s)
-
-            # Confirm port settings are active
-            confirm_rpc_port('mountd', config_db["mountd_port"])
-            confirm_rpc_port('status', config_db["rpcstatd_port"])
-            confirm_rpc_port('nlockmgr', config_db["rpclockd_port"])
-
-    def test_runtime_debug(self, start_nfs):
-        """
-        This validates that the private NFS debugging API works correctly.
-        """
-        assert start_nfs is True
-        disabled = {"NFS": ["NONE"], "NFSD": ["NONE"], "NLM": ["NONE"], "RPC": ["NONE"]}
-        enabled = {"NFS": ["PROC", "XDR", "CLIENT", "MOUNT", "XATTR_CACHE"],
-                   "NFSD": ["ALL"],
-                   "NLM": ["CLIENT", "CLNTLOCK", "SVC"],
-                   "RPC": ["CALL", "NFS", "TRANS"]}
-        failure = {"RPC": ["CALL", "NFS", "TRANS", "NONE"]}
-        try:
-            res = call('nfs.get_debug')
-            assert res == disabled
-
-            call('nfs.set_debug', enabled)
-            res = call('nfs.get_debug')
-            assert set(res['NFS']) == set(enabled['NFS']), f"Mismatch on NFS: {res}"
-            assert set(res['NFSD']) == set(enabled['NFSD']), f"Mismatch on NFSD: {res}"
-            assert set(res['NLM']) == set(enabled['NLM']), f"Mismatch on NLM: {res}"
-            assert set(res['RPC']) == set(enabled['RPC']), f"Mismatch on RPC: {res}"
-
-            # Test failure case.  This should generate an ValueError exception on the system
-            with pytest.raises(ValueError) as ve:
-                call('nfs.set_debug', failure)
-            assert 'Cannot specify another value' in str(ve), ve
-
-        finally:
-            call('nfs.set_debug', disabled)
-            res = call('nfs.get_debug')
-            assert res == disabled
-
-    def test_bind_ip(self, start_nfs):
-        '''
-        This test requires a static IP address
-        * Test the private nfs.bindip call
-        * Test the actual bindip config setting
-        - Confirm setting in conf files
-        - Confirm service on IP address
-        '''
-        assert start_nfs is True
-
-        # Multiple restarts cause systemd failures.  Reset the systemd counters.
-        reset_svcs("nfs-idmapd nfs-mountd nfs-server rpcbind rpc-statd")
-
-        choices = call("nfs.bindip_choices")
-        assert truenas_server.ip in choices
-
-        call("nfs.bindip", {"bindip": [truenas_server.ip]})
-        call("nfs.bindip", {"bindip": []})
-
-        # Test config with bindip.  Use choices from above
-        # TODO: check with 'nmap -sT <IP>' from the runner.
-        with nfs_config() as db_conf:
-
-            # Should have no bindip setting
-            nfs_conf = parse_server_config()
-            rpc_conf = parse_rpcbind_config()
-            assert db_conf['bindip'] == []
-            assert nfs_conf['nfsd'].get('host') is None
-            assert rpc_conf.get('-h') is None
-
-            # Set bindip
-            call("nfs.update", {"bindip": [truenas_server.ip]})
-
-            # Confirm we see it in the nfs and rpc conf files
-            nfs_conf = parse_server_config()
-            rpc_conf = parse_rpcbind_config()
-            assert truenas_server.ip in nfs_conf['nfsd'].get('host'), f"nfs_conf = {nfs_conf}"
-            assert truenas_server.ip in rpc_conf.get('-h'), f"rpc_conf = {rpc_conf}"
-
-    def test_v4_domain(self, start_nfs):
-        '''
-        The v4_domain configuration item maps to the 'Domain' setting in
-        the [General] section of /etc/idmapd.conf.
-        It is described as:
-            The local NFSv4 domain name. An NFSv4 domain is a namespace
-            with a unique username<->UID and groupname<->GID mapping.
-            (Default: Host's fully-qualified DNS domain name)
-        '''
-        assert start_nfs is True
-
-        with nfs_config() as nfs_db:
-            # By default, v4_domain is not set
-            assert nfs_db['v4_domain'] == "", f"Expected zero-len string, but found {nfs_db['v4_domain']}"
-            s = parse_server_config("idmapd")
-            assert s['General'].get('Domain') is None, f"'Domain' was not expected to be set: {s}"
-
-            # Make a setting change and confirm
-            db = call('nfs.update', {"v4_domain": "ixsystems.com"})
-            assert db['v4_domain'] == 'ixsystems.com', f"v4_domain failed to be updated in nfs DB: {db}"
-            s = parse_server_config("idmapd")
-            assert s['General'].get('Domain') == 'ixsystems.com', f"'Domain' failed to be updated in idmapd.conf: {s}"
-
-    def test_xattr_support(self, start_nfs):
-        """
-        Perform basic validation of NFSv4.2 xattr support.
-        Mount path via NFS 4.2, create a file and dir,
-        and write + read xattr on each.
-        """
-        assert start_nfs is True
-
-        xattr_nfs_path = f'/mnt/{pool_name}/test_nfs4_xattr'
-        with nfs_dataset("test_nfs4_xattr"):
-            with nfs_share(xattr_nfs_path):
-                with SSH_NFS(truenas_server.ip, xattr_nfs_path, vers=4.2,
-                             user=user, password=password, ip=truenas_server.ip) as n:
-                    n.create("testfile")
-                    n.setxattr("testfile", "user.testxattr", "the_contents")
-                    xattr_val = n.getxattr("testfile", "user.testxattr")
-                    assert xattr_val == "the_contents"
-
-                    n.create("testdir", True)
-                    n.setxattr("testdir", "user.testxattr2", "the_contents2")
-                    xattr_val = n.getxattr("testdir", "user.testxattr2")
-                    assert xattr_val == "the_contents2"
-
-    class TestSubtreeShares:
-        """
-        Wrap a class around test_37 to allow calling the fixture only once
-        in the parametrized test
-        """
-
-        # TODO: Work up a valid IPv6 test (when CI environment supports it)
-        # res = SSH_TEST(f"ip address show {interface} | grep inet6", user, password, ip)
-        # ipv6_network = str(res['output'].split()[1])
-        # ipv6_host = ipv6_network.split('/')[0]
-
-        @pytest.fixture(scope='class')
-        def dataset_and_dirs(self):
-            """
-            Create a dataset and an NFS share for it for host 127.0.0.1 only
-            In the dataset, create directories: dir1, dir2, dir3
-            In each directory, create subdirs: subdir1, subdir2, subdir3
-            """
-
-            # Characteristics of expected error messages
-            err_strs = [
-                ["Another share", "same path"],
-                ["This or another", "overlaps"],
-                ["Another NFS share already exports"],
-                ["Symbolic links"]
-            ]
-
-            vol0 = f'/mnt/{pool_name}/VOL0'
-            with nfs_dataset('VOL0'):
-                # Top level shared to narrow host
-                with nfs_share(vol0, {'hosts': ['127.0.0.1']}):
-                    # Get the initial list of entries for the cleanup test
-                    contents = call('sharing.nfs.query')
-                    startIdList = [item.get('id') for item in contents]
-
-                    # Create the dirs
-                    dirs = ["everybody_1", "everybody_2", "limited_1", "dir_1", "dir_2"]
-                    subdirs = ["subdir1", "subdir2", "subdir3"]
-                    try:
-                        for dir in dirs:
-                            ssh(f"mkdir -p {vol0}/{dir}")
-                            for subdir in subdirs:
-                                ssh(f"mkdir -p {vol0}/{dir}/{subdir}")
-                                # And symlinks
-                                ssh(f"ln -sf {vol0}/{dir}/{subdir} {vol0}/{dir}/symlink2{subdir}")
-
-                        yield vol0, err_strs
-                    finally:
-                        # Remove the created dirs
-                        for dir in dirs:
-                            ssh(f"rm -rf {vol0}/{dir}")
-
-                        # Remove the created shares
-                        contents = call('sharing.nfs.query')
-                        endIdList = [item.get('id') for item in contents]
-                        [call('sharing.nfs.delete', id) for id in endIdList if id not in startIdList]
-
-        @pytest.mark.parametrize(
-            "dirname,isHost,HostOrNet,ExpectedToPass, ErrFormat", [
-                pp("everybody_1", True, ["*"], True, None, id="NAS-120957: host - everybody"),
-                pp("everybody_2", True, ["*"], True, None, id="NAS-120957: host - non-related paths"),
-                pp("everybody_2", False, ["192.168.1.0/22"], True, None, id="NAS-129577: network, everybody, same path"),
-                pp("limited_1", True, ["127.0.0.1"], True, None, id="NAS-123042: host - export subdirs"),
-                pp("limited_1", False, ["192.168.1.0/22"], True, None, id="NAS-123042: network - export subdirs"),
-                pp("limited_1", True, ["127.0.0.1"], False, 0, id="NAS-127220: host - already exported"),
-                pp("limited_1", False, ["192.168.1.0/22"], False, 2, id="NAS-127220: network - already exported"),
-                pp("dir_1", True, ["*.example.com"], True, None, id="NAS-120616: host - wildcards"),
-                pp("dir_1", True, ["*.example.com"], False, 0, id="NAS-127220: host - wildcard already exported"),
-                pp("dir_1/subdir2", False, ["2001:0db8:85a3:0000:0000:8a2e::/96"],
-                   True, None, id="NAS-123042: network - IPv6 network range"),
-                pp("dir_1/subdir2", True, ["2001:0db8:85a3:0000:0000:8a2e:0370:7334"],
-                   True, None, id="NAS-129577: host - IPv6 allow host overlap with network"),
-                pp("dir_1/subdir2", False, ["2001:0db8:85a3:0000:0000:8a2e:0370:7334/112"],
-                   False, 1, id="NAS-123042: network - IPv6 overlap with network"),
-                pp("dir_1/subdir3", True, ["192.168.27.211"], True, None, id="NAS-123042: host - export sub-subdirs"),
-                pp("dir_1/subdir3", False, ["192.168.24.0/22"],
-                   True, None, id="NAS-129522: network - allow overlap with host"),
-                pp("limited_1/subdir2", True, ["*"], True, None, id="NAS-123042: host - setup everybody on sub-subdir"),
-                pp("limited_1/subdir2", True, ["*"], False, 2, id="NAS-127220: host - already exported sub-subdir"),
-                pp("dir_2/subdir2", False, ["192.168.1.0/24"],
-                   True, None, id="NAS-123042: network - export sub-subdirs"),
-                pp("dir_2/subdir2", False, ["192.168.1.0/32"], False, 1, id="NAS-123042: network - overlap sub-subdir"),
-                pp("limited_1/subdir3", True, ["192.168.1.0", "*.ixsystems.com"],
-                   True, None, id="NAS-123042: host - two hosts, same sub-subdir"),
-                pp("dir_1/symlink2subdir3", True, ["192.168.0.0"], False, 3, id="Block exporting symlinks"),
-            ],
-        )
-        def test_subtree_share(self, start_nfs, dataset_and_dirs, dirname, isHost, HostOrNet, ExpectedToPass, ErrFormat):
-            """
-            Sharing subtrees to the same host can cause problems for
-            NFSv3.  This check makes sure a share creation follows
-            the rules.
-                * First match is applied
-                * A new path that is _the same_ as existing path cannot be shared to same 'host'
-
-            For example, the following is not allowed:
-            "/mnt/dozer/NFS"\
-                fred(rw)
-            "/mnt/dozer/NFS"\
-                fred(ro)
-
-            Also not allowed are collisions that may result in unexpected share permissions.
-            For example, the following is not allowed:
-            "/mnt/dozer/NFS"\
-                *(rw)
-            "/mnt/dozer/NFS"\
-                marketing(ro)
-            """
-            assert start_nfs is True
-
-            vol, err_strs = dataset_and_dirs
-            dirpath = f'{vol}/{dirname}'
-            if isHost:
-                payload = {"path": dirpath, "hosts": HostOrNet}
-            else:
-                payload = {"path": dirpath, "networks": HostOrNet}
-
-            if ExpectedToPass:
-                call("sharing.nfs.create", payload)
-            else:
-                with pytest.raises(ValidationErrors) as ve:
-                    call("sharing.nfs.create", payload)
-                errStr = str(ve.value.errors[0])
-                # Confirm we have the expected error message format
-                for this_substr in err_strs[ErrFormat]:
-                    assert this_substr in errStr
-
-    @pytest.mark.timeout(600)
-    def test_nfsv4_acl_support(self, start_nfs):
-        """
-        This test validates reading and setting NFSv4 ACLs through an NFSv4
-        mount in the following manner for NFSv4.2, NFSv4.1 & NFSv4.0:
-        1) Create and locally mount an NFSv4 share on the TrueNAS server
-        2) Iterate through all possible permissions options and set them
-        via an NFS client, read back through NFS client, and read resulting
-        ACL through the filesystem API.
-        3) Repeat same process for each of the supported ACE flags.
-        4) For NFSv4.1 or NFSv4.2, repeat same process for each of the
-        supported acl_flags.
-        """
-        assert start_nfs is True
-
-        acl_nfs_path = f'/mnt/{pool_name}/test_nfs4_acl'
-        test_perms = {
-            "READ_DATA": True,
-            "WRITE_DATA": True,
-            "EXECUTE": True,
-            "APPEND_DATA": True,
-            "DELETE_CHILD": True,
-            "DELETE": True,
-            "READ_ATTRIBUTES": True,
-            "WRITE_ATTRIBUTES": True,
-            "READ_NAMED_ATTRS": True,
-            "WRITE_NAMED_ATTRS": True,
-            "READ_ACL": True,
-            "WRITE_ACL": True,
-            "WRITE_OWNER": True,
-            "SYNCHRONIZE": True
-        }
-        test_flags = {
-            "FILE_INHERIT": True,
-            "DIRECTORY_INHERIT": True,
-            "INHERIT_ONLY": False,
-            "NO_PROPAGATE_INHERIT": False,
-            "INHERITED": False
-        }
-        # getacl setting
-        simplified = True
-        for (version, test_acl_flag) in [(4, True), (4.1, True), (4.0, False)]:
-            theacl = [
-                {"tag": "owner@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
-                {"tag": "group@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
-                {"tag": "everyone@", "id": -1, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
-                {"tag": "USER", "id": 65534, "perms": test_perms, "flags": test_flags, "type": "ALLOW"},
-                {"tag": "GROUP", "id": 666, "perms": test_perms.copy(), "flags": test_flags.copy(), "type": "ALLOW"},
-            ]
-            with nfs_dataset("test_nfs4_acl", {"acltype": "NFSV4", "aclmode": "PASSTHROUGH"}, theacl):
-                with nfs_share(acl_nfs_path):
-                    with SSH_NFS(truenas_server.ip, acl_nfs_path, vers=version, user=user, password=password, ip=truenas_server.ip) as n:
-                        nfsacl = n.getacl(".")
-                        for idx, ace in enumerate(nfsacl):
-                            assert ace == theacl[idx], str(ace)
-
-                        for perm in test_perms.keys():
-                            if perm == 'SYNCHRONIZE':
-                                # break in SYNCHRONIZE because Linux tool limitation
-                                break
-
-                            theacl[4]['perms'][perm] = False
-                            n.setacl(".", theacl)
-                            nfsacl = n.getacl(".")
-                            for idx, ace in enumerate(nfsacl):
-                                assert ace == theacl[idx], str(ace)
-
-                            result = call('filesystem.getacl', acl_nfs_path, not simplified)
-                            for idx, ace in enumerate(result['acl']):
-                                assert ace == {**nfsacl[idx], "who": None}, str(ace)
-
-                        for flag in ("INHERIT_ONLY", "NO_PROPAGATE_INHERIT"):
-                            theacl[4]['flags'][flag] = True
-                            n.setacl(".", theacl)
-                            nfsacl = n.getacl(".")
-                            for idx, ace in enumerate(nfsacl):
-                                assert ace == theacl[idx], str(ace)
-
-                            result = call('filesystem.getacl', acl_nfs_path, not simplified)
-                            for idx, ace in enumerate(result['acl']):
-                                assert ace == {**nfsacl[idx], "who": None}, str(ace)
-
-                        if test_acl_flag:
-                            assert 'none' == n.getaclflag(".")
-                            for acl_flag in ['auto-inherit', 'protected', 'defaulted']:
-                                n.setaclflag(".", acl_flag)
-                                assert acl_flag == n.getaclflag(".")
-
-                                result = call('filesystem.getacl', acl_nfs_path, not simplified)
-
-                                # Normalize the flag_is_set name for comparision to plugin equivalent
-                                # (just remove the '-' from auto-inherit)
-                                if acl_flag == 'auto-inherit':
-                                    flag_is_set = 'autoinherit'
-                                else:
-                                    flag_is_set = acl_flag
-
-                                # Now ensure that only the expected flag is set
-                                nfs41_flags = result['aclflags']
-                                for flag in ['autoinherit', 'protected', 'defaulted']:
-                                    if flag == flag_is_set:
-                                        assert nfs41_flags[flag], nfs41_flags
-                                    else:
-                                        assert not nfs41_flags[flag], nfs41_flags
-
-    @pytest.mark.parametrize('state,expected', [
-        pp(None, 'n', id="default state"),
-        pp(True, 'y', id="enable"),
-        pp(False, 'n', id="disable")
-    ])
-    def test_manage_gids(self, start_nfs, state, expected):
-        '''
-        The nfsd_manage_gids setting is called "Support > 16 groups" in the webui.
-        It is that and, to a greater extent, defines the GIDs that are used for permissions.
-
-        If NOT enabled, then the expectation is that the groups to which the user belongs
-        are defined on the _client_ and NOT the server.  It also means groups to which the user
-        belongs are passed in on the NFS commands from the client.  The file object GID is
-        checked against the passed in list of GIDs.  This is also where the 16 group
-        limitation is enforced.  The NFS protocol allows passing up to 16 groups per user.
-
-        If nfsd_manage_gids is enabled, the groups to which the user belong are defined
-        on the server.  In this condition, the server confirms the user is a member of
-        the file object GID.
-
-        NAS-126067:  Debian changed the 'default' setting to manage_gids in /etc/nfs.conf
-        from undefined to "manage_gids = y".
-
-        TEST:   Confirm manage_gids is set in /etc/nfs.conf.d/local/conf for
-                both the enable and disable states
-
-        TODO: Add client-side and server-side test from client when available
-        '''
-        assert start_nfs is True
-        with nfs_config():
-
-            if state is not None:
-                sleep(3)  # In Cobia: Prevent restarting NFS too quickly.
-                call("nfs.update", {"userd_manage_gids": state})
-
-            s = parse_server_config()
-            assert s['mountd']['manage-gids'] == expected, str(s)
-
-    def test_rdma_config(self, start_nfs):
-        '''
-        Mock response from rdma.capable_protocols to confirm NFS over RDMA config setting
-        '''
-        assert start_nfs is True
-
-        # Confirm the setting does not exist by default
-        s = parse_server_config()
-        assert s.get('rdma') is None, str(s)
-
-        # RDMA setting should fail on a test vm.
-        with pytest.raises(ValidationErrors) as ve:
-            call("nfs.update", {"rdma": True})
-        assert ve.value.errors == [
-            ValidationError(
-                'nfs_update.rdma',
-                'This platform cannot support NFS over RDMA or is missing an RDMA capable NIC.',
-                22
-            )
-        ]
-
-        with mock("rdma.capable_protocols", return_value=['NFS']):
-            with nfs_config():
-                call("nfs.update", {"rdma": True})
-                s = parse_server_config()
-                assert s['nfsd']['rdma'] == 'y', str(s)
-                # 20049 is the default port for NFS over RDMA.
-                assert s['nfsd']['rdma-port'] == '20049', str(s)
-
-
-def test_pool_delete_with_attached_share():
-    '''
-    Confirm we can delete a pool with the system dataset and a dataset with active NFS shares
-    '''
-    with another_pool() as new_pool:
-        # Move the system dataset to this pool
-        with system_dataset(new_pool['name']):
-            # Add some additional NFS stuff to make it interesting
-            with nfs_dataset("deleteme", pool=new_pool['name']) as ds:
-                with nfs_share(f"/mnt/{ds}"):
-                    with manage_start_nfs():
-                        # Delete the pool and confirm it's gone
-                        call("pool.export", new_pool["id"], {"destroy": True}, job=True)
-                        assert call("pool.query", [["name", "=", f"{new_pool['name']}"]]) == []
-
-
-def test_threadpool_mode():
-    '''
-    Verify that NFS thread pool configuration can be adjusted through private API endpoints.
-
-    NOTE: This request will fail if NFS server (or NFS client) is still running.
-    '''
-    assert get_nfs_service_state() == "STOPPED", "NFS cannot be running during this test."
-    default_mode = call('nfs.get_threadpool_mode')
-
-    supported_modes = ["AUTO", "PERCPU", "PERNODE", "GLOBAL"]
-    try:
-        for m in supported_modes:
-            call('nfs.set_threadpool_mode', m)
-            res = call('nfs.get_threadpool_mode')
-            assert res == m, res
-    finally:
-        # Restore to default
-        call('nfs.set_threadpool_mode', default_mode)
-
-
-@pytest.mark.parametrize('exports', ['missing', 'empty'])
-def test_missing_or_empty_exports(exports):
-    '''
-    NAS-123498: Eliminate conditions on exports for service start                                                                                                                                                                                                    NAS-123498: Eliminate conditions on exports for service start
-    The goal is to make the NFS server behavior similar to the other protocols
-    '''
-    # Setup /etc/exports
-    if exports == 'empty':
-        ssh("echo '' > /etc/exports")
-    else:  # 'missing'
-        ssh("rm -f /etc/exports")
-
-    with nfs_config() as nfs_conf:
-        try:
-            # Start NFS
-            call('service.start', 'nfs')
-            sleep(1)
-            confirm_nfsd_processes(nfs_conf['servers'])
-        finally:
-            # Return NFS to stopped condition
-            call('service.stop', 'nfs')
-            sleep(1)
-
-    # Confirm stopped
-    assert get_nfs_service_state() == "STOPPED"
-
-
-@pytest.mark.parametrize('expect_NFS_start', [False, True])
-def test_files_in_exportsd(expect_NFS_start):
-    '''
-    Any files in /etc/exports.d are potentially dangerous, especially zfs.exports.
-    We implemented protections against rogue exports files.
-    - We block starting NFS if there are any files in /etc/exports.d
-    - We generate an alert when we detect this condition
-    - We clear the alert when /etc/exports.d is empty
-    '''
-    fail_check = {False: 'ConditionDirectoryNotEmpty=!/etc/exports.d', True: None}
-
-    try:
-        # Setup the test
-        set_immutable_state('/etc/exports.d', want_immutable=False)  # Disable immutable
-
-        # Do the 'failing' case first to end with a clean condition
-        if not expect_NFS_start:
-            ssh("echo 'bogus data' > /etc/exports.d/persistent.file")
-            ssh("chattr +i /etc/exports.d/persistent.file")
-        else:
-            # Restore /etc/exports.d directory to a clean state
-            ssh("chattr -i /etc/exports.d/persistent.file")
-            ssh("rm -rf /etc/exports.d/*")
-
-        set_immutable_state('/etc/exports.d', want_immutable=True)  # Enable immutable
-
-        set_nfs_service_state('start', expect_NFS_start, fail_check[expect_NFS_start])
-
-    finally:
-        # In all cases we want to end with NFS stopped
-        set_nfs_service_state('stop')
-
-        # If NFS start is blocked, then an alert should have been raised
-        alerts = call('alert.list')
-        if not expect_NFS_start:
-            # Find alert
-            assert any(alert["klass"] == "NFSblockedByExportsDir" for alert in alerts), alerts
-        else:  # Alert should have been cleared
-            assert not any(alert["klass"] == "NFSblockedByExportsDir" for alert in alerts), alerts
diff --git a/tests/api2/test_310_service_announcement.py b/tests/api2/test_310_service_announcement.py
deleted file mode 100644
index 97a45599459c9..0000000000000
--- a/tests/api2/test_310_service_announcement.py
+++ /dev/null
@@ -1,479 +0,0 @@
-import contextlib
-import random
-import re
-import socket
-import string
-from datetime import datetime, timedelta
-from time import sleep
-from typing import cast
-
-import pytest
-from assets.websocket.server import reboot
-from assets.websocket.service import (ensure_service_disabled,
-                                      ensure_service_enabled,
-                                      ensure_service_started,
-                                      ensure_service_stopped)
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-from pytest_dependency import depends
-from zeroconf import ServiceBrowser, ServiceStateChange, Zeroconf
-
-from auto_config import ha, password, pool_name, user
-from functions import SSH_TEST
-from protocols import smb_share
-
-digits = ''.join(random.choices(string.digits, k=4))
-dataset_name = f"smb-cifs{digits}"
-SMB_NAME1 = f"TestCifsSMB{digits}"
-SMB_PATH1 = f"/mnt/{pool_name}/{dataset_name}"
-
-dataset_name2 = f"other{digits}"
-SMB_NAME2 = f"OtherTestSMB{digits}"
-SMB_PATH2 = f"/mnt/{pool_name}/{dataset_name2}"
-
-# Service names
-TIME_MACHINE = '_adisk._tcp.local.'  # Automatic Disk
-DEVICE_INFO = '_device-info._tcp.local.'  # Device Info
-HTTP = '_http._tcp.local.'
-SMB = '_smb._tcp.local.'
-NUT = '_nut._tcp'
-
-DO_MDNS_REBOOT_TEST = False
-USE_AVAHI_BROWSE = True
-skip_avahi_browse_tests = pytest.mark.skipif(USE_AVAHI_BROWSE, reason="Skip tests broken by use of avahi-browse")
-
-
-def _get_tm_props(rec, key):
-    result = {}
-    for pair in rec['properties'][key].decode('utf-8').split(','):
-        k, v = pair.split('=')
-        result[k] = v
-    return result
-
-
-def allow_settle(delay=3):
-    # Delay slightly to allow things to propagate
-    sleep(delay)
-
-
-@contextlib.contextmanager
-def service_announcement_config(config):
-    if not config:
-        yield
-    else:
-        old_config = call('network.configuration.config')['service_announcement']
-        call('network.configuration.update', {'service_announcement': config})
-        try:
-            yield
-        finally:
-            call('network.configuration.update', {'service_announcement': old_config})
-
-
-@contextlib.contextmanager
-def ensure_aapl_extensions():
-    # First check
-    enabled = call('smb.config')['aapl_extensions']
-    if enabled:
-        yield
-    else:
-        call('smb.update', {'aapl_extensions': True})
-        try:
-            yield
-        finally:
-            call('smb.update', {'aapl_extensions': False})
-
-
-def wait_for_avahi_startup(interval=5, timeout=300):
-    """When tests are running in a QE environment it can take a long
-    time for the service to start up completely, because many systems
-    can be configured with the same hostname.
-
-    This function will detect the most recent avahi-daemon startup and
-    wait for it to complete"""
-    command = 'journalctl --no-pager -u avahi-daemon --since "10 minute ago"'
-    brackets = re.compile(r'[\[\]]+')
-    while timeout > 0:
-        startup = None
-        ssh_out = SSH_TEST(command, user, password)
-        assert ssh_out['result'], str(ssh_out)
-        output = ssh_out['output']
-        # First we just look for the most recent startup command
-        for line in output.split('\n'):
-            if line.endswith('starting up.'):
-                startup = line
-        if startup:
-            pid = brackets.split(startup)[1]
-            completion = f'avahi-daemon[{pid}]: Server startup complete.'
-            for line in output.split('\n'):
-                if completion in line:
-                    # Did we just complete
-                    finish_plus_five = (datetime.strptime(line.split()[2], "%H:%M:%S") + timedelta(seconds=5)).time()
-                    if finish_plus_five > datetime.now().time():
-                        # Wait 5 seconds to ensure services are published
-                        sleep(5)
-                    return True
-        sleep(interval)
-        timeout -= interval
-    return False
-
-
-class ZeroconfCollector:
-
-    def on_service_state_change(self, zeroconf, service_type, name, state_change):
-
-        if state_change is ServiceStateChange.Added:
-            info = zeroconf.get_service_info(service_type, name)
-            if info:
-                item = {}
-                item['addresses'] = [addr for addr in info.parsed_scoped_addresses()]
-                if self.ip not in item['addresses']:
-                    return
-                item['port'] = cast(int, info.port)
-                item['server'] = info.server
-                if info.properties:
-                    item['properties'] = {}
-                    for key, value in info.properties.items():
-                        if key:
-                            item['properties'][key] = value
-                else:
-                    item['properties'] = {}
-                self.result[service_type][name] = item
-                self.update_internal_hostname(item['server'])
-
-    def find_items(self, service_announcement=None, timeout=5):
-        self.result = {}
-        for service in self.SERVICES:
-            self.result[service] = {}
-        with service_announcement_config(service_announcement):
-            assert wait_for_avahi_startup(), "Failed to detect avahi-daemon startup"
-            zeroconf = Zeroconf()
-            ServiceBrowser(zeroconf, self.SERVICES, handlers=[self.on_service_state_change])
-            try:
-                sleep(timeout)
-            finally:
-                zeroconf.close()
-        return self.result
-
-    def clear_cache(self):
-        # No-op for zeroconf collector
-        pass
-
-
-class AvahiBrowserCollector:
-
-    name_to_service = {
-        'Device Info': DEVICE_INFO,
-        'Web Site': HTTP,
-        'Microsoft Windows Network': SMB,
-        'Apple TimeMachine': TIME_MACHINE,
-        '_nut._tcp': NUT,
-    }
-
-    def find_items(self, service_announcement=None, timeout=5):
-        self.result = {}
-        for service in self.SERVICES:
-            self.result[service] = {}
-        with service_announcement_config(service_announcement):
-            assert wait_for_avahi_startup(), "Failed to detect avahi-daemon startup"
-            # ssh_out = SSH_TEST("avahi-browse -v --all -t -p --resolve", user, password)
-            # Appears sometimes we need a little more time
-            ssh_out = SSH_TEST("timeout --preserve-status 5 avahi-browse -v --all -p --resolve", user, password)
-            assert ssh_out['result'], str(ssh_out)
-            output = ssh_out['output']
-            for line in output.split('\n'):
-                item = {}
-                items = line.split(';')
-                if len(items) > 1 and items[0] == '=':
-                    if len(items) == 10:
-                        server = items[3]
-                        pub_ip = items[7]
-                        if pub_ip not in self.ips:
-                            continue
-                        item['addresses'] = [pub_ip]
-                        item['port'] = items[8]
-                        item['server'] = items[6]
-                        service_type = AvahiBrowserCollector.name_to_service[items[4]]
-                        key = f"{server}.{service_type}"
-                        item['properties'] = self.process_properties(items[9], service_type)
-                        self.result[service_type][key] = item
-                        self.update_internal_hostname(item['server'])
-        return self.result
-
-    def process_properties(self, txts, service_type):
-        props = {}
-        for txt in txts.split():
-            if txt.startswith('"') and txt.endswith('"'):
-                txt = txt[1:-1]
-                for prop in ['model', 'dk0', 'dk1', 'sys']:
-                    if txt.startswith(f"{prop}="):
-                        props[prop.encode('utf-8')] = txt[len(prop) + 1:].encode('utf-8')
-        return props
-
-    def clear_cache(self):
-        # We need to restart the avahi-daemon to clear cache
-        # print("Clearing cache")
-        ssh("systemctl restart avahi-daemon")
-
-    @staticmethod
-    def get_ipv6(ip):
-        """Given an IPv4 address string, find the IPv6 on the same
-        interface (if present).  Returns either the IPv6 address as
-        a string, or None"""
-        ips = call('network.general.summary')['ips']
-        for interface in ips:
-            matched = False
-            if 'IPV4' in ips[interface]:
-                for ipv4 in ips[interface]['IPV4']:
-                    if ipv4.split('/')[0] == ip:
-                        matched = True
-                        break
-            if matched and 'IPV6' in ips[interface]:
-                for ipv6 in ips[interface]['IPV6']:
-                    return ipv6.split('/')[0]
-        return None
-
-
-class abstractmDNSAnnounceCollector:
-    """
-    Class to help in the discovery (and processing/checking)
-    of services advertised by a particular IP address/server name.
-    """
-    SERVICES = [TIME_MACHINE, DEVICE_INFO, HTTP, SMB, NUT]
-
-    def __init__(self, ip, tn_hostname):
-        self.ip = socket.gethostbyname(ip)
-        self.hostname = self.tn_hostname = tn_hostname
-
-    def update_internal_hostname(self, published_hostname):
-        """If there has been a conflict then it is possible that a derivative
-        of the original hostname is being used.  Check whether this the
-        published name could be a conflict-resolved name and if so,
-        update the hostname that will be used during checks.
-        """
-        if published_hostname == self.tn_hostname:
-            return
-        possible_new_hostname = published_hostname.split('.')[0]
-        if possible_new_hostname == self.hostname:
-            return
-        # Check whether either 'hostname-...' or '<hostname> #...'
-        if possible_new_hostname.split()[0].split('-')[0] == self.tn_hostname:
-            self.hostname = possible_new_hostname
-
-    def has_service_type(self, hostname, service_type):
-        if not hostname:
-            hostname = self.hostname
-        key = f"{hostname}.{service_type}"
-        return key in self.result[service_type]
-
-    def get_service_type(self, hostname, service_type):
-        if not hostname:
-            hostname = self.hostname
-        key = f"{hostname}.{service_type}"
-        if key in self.result[service_type]:
-            return self.result[service_type][key]
-
-    def has_time_machine(self, hostname=None):
-        return self.has_service_type(hostname, TIME_MACHINE)
-
-    def has_device_info(self, hostname=None):
-        return self.has_service_type(hostname, DEVICE_INFO)
-
-    def has_http(self, hostname=None):
-        return self.has_service_type(hostname, HTTP)
-
-    def has_smb(self, hostname=None):
-        return self.has_service_type(hostname, SMB)
-
-    def time_machine(self, hostname=None):
-        return self.get_service_type(hostname, TIME_MACHINE)
-
-    def check_present(self, device_info=True, http=True, smb=True, time_machine=True, hostname=None):
-        assert self.has_device_info(hostname) == device_info, self.result[DEVICE_INFO]
-        assert self.has_http(hostname) == http, self.result[HTTP]
-        assert self.has_smb(hostname) == smb, self.result[SMB]
-        assert self.has_time_machine(hostname) == time_machine, self.result[TIME_MACHINE]
-
-
-if USE_AVAHI_BROWSE:
-    class mDNSAnnounceCollector(abstractmDNSAnnounceCollector, AvahiBrowserCollector):
-        def __init__(self, ip, tn_hostname):
-            abstractmDNSAnnounceCollector.__init__(self, ip, tn_hostname)
-            # avahi-browse can report either an IPv4 address or the
-            # corresponding IPv6 address if configured on the same interface
-            # So we will expand our inclusion check to encompass both.
-            ipv6 = AvahiBrowserCollector.get_ipv6(self.ip)
-            if ipv6:
-                self.ips = [self.ip, ipv6]
-            else:
-                self.ips = [self.ip]
-else:
-    class mDNSAnnounceCollector(abstractmDNSAnnounceCollector, ZeroconfCollector):
-        pass
-
-
-@pytest.fixture(autouse=True, scope="module")
-def setup_environment():
-    try:
-        with ensure_service_disabled('cifs'):
-            with ensure_service_stopped('cifs'):
-                yield
-    finally:
-        pass
-
-
-@pytest.mark.timeout(600)
-@pytest.mark.dependency(name="servann_001")
-def test_001_initial_config(request):
-    """Ensure that the service announcement configuration is as expected."""
-    global current_hostname
-
-    network_config = call('network.configuration.config')
-    sa = network_config['service_announcement']
-    if ha:
-        current_hostname = network_config['hostname_virtual']
-    else:
-        current_hostname = network_config['hostname']
-    # At the moment we only care about mdns
-    assert sa['mdns'] is True, sa
-
-    # Let's restart avahi (in case we've updated middleware)
-    call('service.restart', 'mdns')
-    ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
-    ac.find_items()
-    ac.check_present(smb=False, time_machine=False)
-
-
-# This test is broken by the use of avahi-browse as when it is
-# called it re-activates the avahi-daemon by means of the
-# avahi-daemon.socket.
-# The DEV and HTTP service files have NOT been deleted upon
-# a service stop, so this reactivation causes the test to
-# fail.
-# Since the test passes when run with zeroconf library on
-# a suitably connected test-runner, no real need to chase.
-@pytest.mark.timeout(600)
-@skip_avahi_browse_tests
-def test_002_mdns_disabled(request):
-    depends(request, ["servann_001"], scope="session")
-    ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
-    ac.clear_cache()
-    ac.find_items({'mdns': False, 'wsd': True, 'netbios': False})
-    ac.check_present(False, False, False, False)
-
-
-# Setting a VERY long timeout as when this test is run in isolation
-# on jenkins there can be many (20+) hostname clashes which means
-# avahi can take a LONG time to settle down/start up.
-#
-# We could avoid by setting a unique hostname (as is done during a
-# full test run), but it also seems worthwhile exercise to be able
-# to test in such a unsuitable environment.
-@pytest.mark.timeout(900)
-def test_003_mdns_smb_share(request):
-    """Perform some mDNS tests wrt SMB and ADISK services."""
-    depends(request, ["servann_001"], scope="session")
-
-    # SMB is not started originally
-    ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
-    ac.find_items()
-    ac.check_present(smb=False, time_machine=False)
-
-    with dataset(dataset_name):
-        with smb_share(SMB_PATH1, {'name': SMB_NAME1, 'comment': 'Test SMB Share'}):
-            # SMB is still not started
-            ac.find_items()
-            ac.check_present(smb=False, time_machine=False)
-            with ensure_service_started('cifs'):
-                allow_settle()
-                ac.find_items()
-                ac.check_present(time_machine=False)
-            # OK, the SMB is stopped again,  Ensure we don't advertise SMB anymore
-            ac.clear_cache()
-            ac.find_items()
-            ac.check_present(smb=False, time_machine=False)
-
-        # Now we're going to setup a time machine share
-        with ensure_aapl_extensions():
-            with ensure_service_started('cifs'):
-                allow_settle()
-                # Check mDNS before we have a time machine share
-                ac.find_items()
-                ac.check_present(time_machine=False)
-                with smb_share(SMB_PATH1, {'name': SMB_NAME1,
-                                           'comment': 'Basic TM SMB Share',
-                                           'purpose': 'TIMEMACHINE'}) as shareID1:
-                    allow_settle()
-                    # Check mDNS now we have a time machine share
-                    ac.find_items()
-                    ac.check_present()
-
-                    # Now read the share details and then check against what mDNS reported
-                    share1 = call('sharing.smb.query', [['id', '=', shareID1]])[0]
-
-                    tm = ac.time_machine()
-                    props = _get_tm_props(tm, b'dk0')
-                    assert props['adVN'] == SMB_NAME1, props
-                    assert props['adVF'] == '0x82', props
-                    assert props['adVU'] == share1['vuid'], props
-                    # Now make another time machine share
-                    with dataset(dataset_name2):
-                        with smb_share(SMB_PATH2, {'name': SMB_NAME2,
-                                                   'comment': 'Multiuser TM SMB Share',
-                                                   'purpose': 'ENHANCED_TIMEMACHINE'}) as shareID2:
-                            share2 = call('sharing.smb.query', [['id', '=', shareID2]])[0]
-                            allow_settle()
-                            ac.find_items()
-                            ac.check_present()
-                            tm = ac.time_machine()
-                            props0 = _get_tm_props(tm, b'dk0')
-                            props1 = _get_tm_props(tm, b'dk1')
-                            assert props0['adVF'] == '0x82', props0
-                            assert props1['adVF'] == '0x82', props1
-                            # Let's not make any assumption about which share is which
-                            if props0['adVN'] == SMB_NAME1:
-                                # SHARE 1 in props0
-                                assert props0['adVU'] == share1['vuid'], props0
-                                # SHARE 2 in props1
-                                assert props1['adVN'] == SMB_NAME2, props1
-                                assert props1['adVU'] == share2['vuid'], props1
-                            else:
-                                # SHARE 1 in props1
-                                assert props1['adVN'] == SMB_NAME1, props1
-                                assert props1['adVU'] == share1['vuid'], props1
-                                # SHARE 2 in props0
-                                assert props0['adVN'] == SMB_NAME2, props0
-                                assert props0['adVU'] == share2['vuid'], props0
-                    # Still have one TM share
-                    allow_settle()
-                    ac.find_items()
-                    ac.check_present()
-
-                # Check mDNS now we no longer have a time machine share
-                ac.clear_cache()
-                ac.find_items()
-                ac.check_present(time_machine=False)
-            # Finally check when SMB is stopped again
-            ac.clear_cache()
-            ac.find_items()
-            ac.check_present(smb=False, time_machine=False)
-
-
-if DO_MDNS_REBOOT_TEST:
-    def test_004_reboot_with_mdns_smb_share(request):
-        """Create a time-machine SMB and check that it is published
-        following a reboot."""
-        depends(request, ["servann_001"], scope="session")
-
-        # First let's setup a time machine share
-        with dataset(dataset_name):
-            with smb_share(SMB_PATH1, {'name': SMB_NAME1,
-                                       'comment': 'Basic TM SMB Share',
-                                       'purpose': 'TIMEMACHINE'}):
-                with ensure_service_enabled('cifs'):
-                    # Next reboot and then check the expected services
-                    # are advertised.
-                    reboot(truenas_server.ip, 'cifs')
-                    ac = mDNSAnnounceCollector(truenas_server.ip, current_hostname)
-                    ac.find_items()
-                    ac.check_present()
diff --git a/tests/api2/test_344_acl_templates.py b/tests/api2/test_344_acl_templates.py
deleted file mode 100644
index 1832fa45920b8..0000000000000
--- a/tests/api2/test_344_acl_templates.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import pytest
-from contextlib import contextmanager
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.pool import dataset as make_dataset
-
-
-@pytest.fixture(scope='module')
-def acltemplate_ds():
-    """
-    Setup of datasets for testing templates.
-    This test shouldn't fail unless pool.dataset endpoint is
-    thoroughly broken.
-    """
-    with make_dataset('acltemplate_posix', data={
-        'acltype': 'POSIX',
-        'aclmode': 'DISCARD'
-    }) as posix_ds:
-        with make_dataset('acltemplate_nfsv4', data={
-            'acltype': 'NFSV4',
-            'aclmode': 'PASSTHROUGH'
-        }) as nfsv4_ds:
-            yield {'POSIX': posix_ds, 'NFSV4': nfsv4_ds}
-
-
-@contextmanager
-def create_entry_type(acltype):
-    entry = call('filesystem.acltemplate.query', [['name', '=', f'{acltype}_RESTRICTED']], {'get': True})
-    acl = entry['acl']
-
-    payload = {
-        'name': f'{acltype}_TEST',
-        'acl': acl,
-        'acltype': entry['acltype']
-    }
-
-    template = call('filesystem.acltemplate.create', payload)
-
-    try:
-        yield template
-    finally:
-        call('filesystem.acltemplate.delete', template['id'])
-
-    # Verify actually deleted
-    assert call('filesystem.acltemplate.query', [['name', '=', f'{acltype}_TEST']]) == []
-
-
-@pytest.fixture(scope='function')
-def tmp_posix_entry():
-    with create_entry_type('POSIX') as entry:
-        yield entry
-
-
-@pytest.fixture(scope='function')
-def tmp_nfs_entry():
-    with create_entry_type('NFS4') as entry:
-        yield entry
-
-
-@pytest.fixture(scope='function')
-def tmp_acltemplates(tmp_posix_entry, tmp_nfs_entry):
-    yield {'POSIX': tmp_posix_entry, 'NFSV4': tmp_nfs_entry}
-
-
-def dataset_path(data, acltype):
-    return os.path.join('/mnt', data[acltype])
-
-
-@pytest.mark.parametrize('acltype', ['NFSV4', 'POSIX'])
-def test_check_builtin_types_by_path(acltemplate_ds, acltype):
-    """
-    This test verifies that we can query builtins by paths, and
-    that the acltype of the builtins matches that of the
-    underlying path.
-    """
-    expected_acltype = 'POSIX1E' if acltype == 'POSIX' else 'NFS4'
-    payload = {'path': dataset_path(acltemplate_ds, acltype)}
-    for entry in call('filesystem.acltemplate.by_path', payload):
-        assert entry['builtin'], str(entry)
-        assert entry['acltype'] == expected_acltype, str(entry)
-
-    payload['format-options'] = {'resolve_names': True, 'ensure_builtins': True}
-    for entry in call('filesystem.acltemplate.by_path', payload):
-        for ace in entry['acl']:
-            if ace['tag'] not in ('USER_OBJ', 'GROUP_OBJ', 'USER', 'GROUP'):
-                continue
-
-            assert ace.get('who') is not None, str(ace)
-
-
-@pytest.mark.parametrize('acltype', ['NFSV4', 'POSIX'])
-def test_update_new_template(tmp_acltemplates, acltype):
-    """
-    Rename the template we created to validated that `update`
-    method works.
-    """
-    # shallow copy is sufficient since we're not changing nested values
-    payload = tmp_acltemplates[acltype].copy()
-
-    template_id = payload.pop('id')
-    payload.pop('builtin')
-    orig_name = payload.pop('name')
-
-    payload['name'] = f'{orig_name}2'
-
-    result = call('filesystem.acltemplate.update', template_id, payload)
-    assert result['name'] == payload['name']
-
-
-def test_knownfail_builtin_delete(request):
-    builtin_templ = call('filesystem.acltemplate.query', [['builtin', '=', True]], {'get': True})
-
-    with pytest.raises(Exception):
-        call('filesystem.acltemplate.delete', builtin_templ['id'])
-
-
-def test_knownfail_builtin_update(request):
-    payload = call('filesystem.acltemplate.query', [['builtin', '=', True]], {'get': True})
-
-    tmpl_id = payload.pop('id')
-    payload.pop('builtin')
-    payload['name'] = 'CANARY'
-
-    with pytest.raises(Exception):
-        call('filesystem.acltemplate.update', tmpl_id, payload)
diff --git a/tests/api2/test_345_acl_nfs4.py b/tests/api2/test_345_acl_nfs4.py
deleted file mode 100644
index b0b1fd6244994..0000000000000
--- a/tests/api2/test_345_acl_nfs4.py
+++ /dev/null
@@ -1,938 +0,0 @@
-import secrets
-import string
-import os
-
-import pytest
-from pytest_dependency import depends
-
-from auto_config import pool_name
-from functions import SSH_TEST
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import user as create_user
-from middlewared.test.integration.assets.pool import dataset as make_dataset
-from middlewared.test.integration.utils import call, ssh
-
-
-shell = '/usr/bin/bash'
-group = 'nogroup'
-ACLTEST_DATASET_NAME = 'acltest'
-ACLTEST_DATASET = f'{pool_name}/{ACLTEST_DATASET_NAME}'
-dataset_url = ACLTEST_DATASET.replace('/', '%2F')
-
-ACLTEST_SUBDATASET = f'{pool_name}/acltest/sub1'
-getfaclcmd = "nfs4xdr_getfacl"
-setfaclcmd = "nfs4xdr_setfacl"
-group0 = "root"
-
-ACL_USER = 'acluser'
-ACL_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-base_permset = {
-    "READ_DATA": False,
-    "WRITE_DATA": False,
-    "APPEND_DATA": False,
-    "READ_NAMED_ATTRS": False,
-    "WRITE_NAMED_ATTRS": False,
-    "EXECUTE": False,
-    "DELETE_CHILD": False,
-    "READ_ATTRIBUTES": False,
-    "WRITE_ATTRIBUTES": False,
-    "DELETE": False,
-    "READ_ACL": False,
-    "WRITE_ACL": False,
-    "WRITE_OWNER": False,
-    "SYNCHRONIZE": True
-}
-
-base_flagset = {
-    "FILE_INHERIT": False,
-    "DIRECTORY_INHERIT": False,
-    "NO_PROPAGATE_INHERIT": False,
-    "INHERIT_ONLY": False,
-    "INHERITED": False
-}
-
-BASIC_PERMS = ["READ", "TRAVERSE", "MODIFY", "FULL_CONTROL"]
-BASIC_FLAGS = ["INHERIT", "NOINHERIT"]
-TEST_FLAGS = [
-    'DIRECTORY_INHERIT',
-    'FILE_INHERIT',
-    'INHERIT_ONLY',
-    'NO_PROPAGATE_INHERIT'
-]
-
-INHERIT_FLAGS_BASIC = {
-    "FILE_INHERIT": True,
-    "DIRECTORY_INHERIT": True,
-    "NO_PROPAGATE_INHERIT": False,
-    "INHERIT_ONLY": False,
-    "INHERITED": False
-}
-
-INHERIT_FLAGS_ADVANCED = {
-    "FILE_INHERIT": True,
-    "DIRECTORY_INHERIT": True,
-    "NO_PROPAGATE_INHERIT": True,
-    "INHERIT_ONLY": True,
-    "INHERITED": False
-}
-
-default_acl = [
-    {
-        "tag": "owner@",
-        "id": None,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    },
-    {
-        "tag": "group@",
-        "id": None,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    }
-]
-
-function_testing_acl_deny = [
-    {
-        "tag": "owner@",
-        "id": None,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    },
-    {
-        "tag": "group@",
-        "id": None,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    },
-    {
-        "tag": "everyone@",
-        "id": None,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    },
-]
-
-function_testing_acl_allow = [
-    {
-        "tag": "owner@",
-        "id": None,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    },
-    {
-        "tag": "group@",
-        "id": None,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    }
-]
-
-# base64-encoded samba DOSATTRIB xattr
-DOSATTRIB_XATTR = "CTB4MTAAAAMAAwAAABEAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABimX3sSqfTAQAAAAAAAAAACg=="
-
-IMPLEMENTED_DENY = [
-    "WRITE_ATTRIBUTES",
-    "DELETE",
-    "DELETE_CHILD",
-    "FULL_DELETE",
-    "EXECUTE",
-    "READ_DATA",
-    "WRITE_DATA",
-    "READ_ACL",
-    "WRITE_ACL",
-    "WRITE_OWNER",
-]
-
-IMPLEMENTED_ALLOW = [
-    "READ_DATA",
-    "WRITE_DATA",
-    "DELETE",
-    "DELETE_CHILD",
-    "EXECUTE",
-    "WRITE_OWNER",
-    "READ_ACL",
-    "WRITE_ACL",
-]
-
-TEST_INFO = {}
-
-
-@pytest.fixture(scope='module')
-def initialize_for_acl_tests(request):
-    with make_dataset(ACLTEST_DATASET_NAME, data={'acltype': 'NFSV4', 'aclmode': 'RESTRICTED'}) as ds:
-        with create_user({
-            'username': ACL_USER,
-            'full_name': ACL_USER,
-            'group_create': True,
-            'ssh_password_enabled': True,
-            'password': ACL_PWD
-        }) as u:
-            TEST_INFO.update({
-                'dataset': ds,
-                'dataset_path': os.path.join('/mnt', ds),
-                'user': u
-            })
-            yield request
-
-
-@pytest.mark.dependency(name='HAS_NFS4_ACLS')
-def test_02_create_dataset(initialize_for_acl_tests):
-    acl = call('filesystem.getacl', TEST_INFO['dataset_path'])
-    assert acl['acltype'] == 'NFS4'
-
-
-def test_04_basic_set_acl_for_dataset(request):
-    depends(request, ["HAS_NFS4_ACLS"])
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'uid': 65534,
-        'gid': 65534,
-        'dacl': default_acl
-    }, job=True)
-
-    acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'],  True)
-    for key in ['tag', 'type', 'perms', 'flags']:
-        assert acl_result['acl'][0][key] == default_acl[0][key], str(acl_result)
-        assert acl_result['acl'][1][key] == default_acl[1][key], str(acl_result)
-
-    assert acl_result['uid'] == 65534, str(acl_result)
-
-
-"""
-At this point very basic functionality of API endpoint is verified.
-Proceed to more rigorous testing of basic and advanced permissions.
-These tests will only manipulate the first entry in the default ACL (owner@).
-Each test will iterate through all available options for that particular
-variation (BASIC/ADVANCED permissions, BASIC/ADVANCED flags).
-"""
-
-
-@pytest.mark.parametrize('permset', BASIC_PERMS)
-def test_08_set_basic_permsets(request, permset):
-    depends(request, ["HAS_NFS4_ACLS"])
-    default_acl[0]['perms']['BASIC'] = permset
-
-    call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True)
-    acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True)
-    requested_perms = default_acl[0]['perms']
-    received_perms = acl_result['acl'][0]['perms']
-    assert requested_perms == received_perms, str(acl_result)
-
-
-@pytest.mark.parametrize('flagset', BASIC_FLAGS)
-def test_09_set_basic_flagsets(request, flagset):
-    depends(request, ["HAS_NFS4_ACLS"])
-    default_acl[0]['flags']['BASIC'] = flagset
-
-    call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True)
-    acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True)
-    requested_flags = default_acl[0]['flags']
-    received_flags = acl_result['acl'][0]['flags']
-    assert requested_flags == received_flags, str(acl_result)
-
-
-@pytest.mark.parametrize('perm', base_permset.keys())
-def test_10_set_advanced_permset(request, perm):
-    depends(request, ["HAS_NFS4_ACLS"])
-    for key in ['perms', 'flags']:
-        if default_acl[0][key].get('BASIC'):
-            default_acl[0][key].pop('BASIC')
-
-    default_acl[0]['flags'] = base_flagset.copy()
-    default_acl[0]['perms'] = base_permset.copy()
-    default_acl[0]['perms'][perm] = True
-
-    call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True)
-    acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True)
-    requested_perms = default_acl[0]['perms']
-    received_perms = acl_result['acl'][0]['perms']
-    assert requested_perms == received_perms, str(acl_result)
-
-
-@pytest.mark.parametrize('flag', TEST_FLAGS)
-def test_11_set_advanced_flagset(request, flag):
-    depends(request, ["HAS_NFS4_ACLS"])
-    default_acl[0]['flags'] = base_flagset.copy()
-    default_acl[0]['flags'][flag] = True
-    if flag in ['INHERIT_ONLY', 'NO_PROPAGATE_INHERIT']:
-        default_acl[0]['flags']['DIRECTORY_INHERIT'] = True
-
-    call('filesystem.setacl', {'path': TEST_INFO['dataset_path'], 'dacl': default_acl}, job=True)
-    acl_result = call('filesystem.getacl', TEST_INFO['dataset_path'], True)
-    requested_flags = default_acl[0]['flags']
-    received_flags = acl_result['acl'][0]['flags']
-    assert requested_flags == received_flags, str(acl_result)
-
-
-"""
-This next series of tests verifies that ACLs are being inherited correctly.
-We first create a child dataset to verify that ACLs do not change unless
-'traverse' is set.
-"""
-
-
-def test_12_prepare_recursive_tests(request):
-    depends(request, ["HAS_NFS4_ACLS"], scope="session")
-    call('pool.dataset.create', {'name': ACLTEST_SUBDATASET, 'acltype': 'NFSV4'})
-
-    ssh(';'.join([
-        f'mkdir -p /mnt/{ACLTEST_DATASET}/dir1/dir2',
-        f'touch /mnt/{ACLTEST_DATASET}/dir1/testfile',
-        f'touch /mnt/{ACLTEST_DATASET}/dir1/dir2/testfile'
-    ]))
-
-
-def test_13_recursive_no_traverse(request):
-    depends(request, ["HAS_NFS4_ACLS"])
-    default_acl[1]['perms'].pop('BASIC')
-    default_acl[1]['flags'].pop('BASIC')
-    default_acl[0]['flags'] = INHERIT_FLAGS_BASIC.copy()
-    default_acl[1]['flags'] = INHERIT_FLAGS_ADVANCED.copy()
-
-    expected_flags_0 = INHERIT_FLAGS_BASIC.copy()
-    expected_flags_0['INHERITED'] = True
-    expected_flags_1 = base_flagset.copy()
-    expected_flags_1['INHERITED'] = True
-
-    # get acl of child dataset. This should not  change in this test
-    acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_SUBDATASET}', True)
-    init_acl = acl_result['acl'][0]['perms']
-
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': default_acl,
-        'uid': 65534,
-        'options': {'recursive': True}
-    }, job=True)
-
-    # Verify that it hasn't changed
-    acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_SUBDATASET}', True)
-    fin_acl = acl_result['acl'][0]['perms']
-    assert init_acl == fin_acl, str(acl_result)
-
-    # check on dir 1. Entry 1 should have INHERIT flag added, and
-    # INHERIT_ONLY should be set to False at this depth.
-    acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_DATASET}/dir1', False)
-    theacl = acl_result['acl']
-    assert theacl[0]['flags'] == expected_flags_0, acl_result
-    assert theacl[1]['flags'] == expected_flags_1, acl_result
-
-    # Verify that user was changed on subdirectory
-    assert acl_result['uid'] == 65534, acl_result
-
-    # check on dir 2 - the no propogate inherit flag should have taken
-    # effect and ACL length should be 1
-    acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_DATASET}/dir1/dir2', False)
-    theacl = acl_result['acl']
-    assert theacl[0]['flags'] == expected_flags_0, acl_result
-    assert len(theacl) == 1, acl_result
-
-    # Verify that user was changed two deep
-    assert acl_result['uid'] == 65534, acl_result
-
-
-def test_14_recursive_with_traverse(request):
-    depends(request, ["HAS_NFS4_ACLS"])
-    expected_flags_0 = INHERIT_FLAGS_BASIC.copy()
-    expected_flags_0['INHERITED'] = True
-    expected_flags_1 = base_flagset.copy()
-    expected_flags_1['INHERITED'] = True
-
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': default_acl,
-        'uid': 65534,
-        'options': {'recursive': True, 'traverse': True}
-    }, job=True)
-
-    acl_result = call('filesystem.getacl', f'/mnt/{ACLTEST_SUBDATASET}', True)
-    theacl = acl_result['acl']
-    assert theacl[0]['flags'] == expected_flags_0, acl_result
-    assert theacl[1]['flags'] == expected_flags_1, acl_result
-
-    # Verify that user was changed
-    assert acl_result['uid'] == 65534, acl_result
-
-
-def test_15_strip_acl_from_dataset(request):
-    depends(request, ["HAS_NFS4_ACLS"])
-    call('filesystem.setperm', {
-        'path': TEST_INFO['dataset_path'],
-        'mode': '777',
-        'uid': 65534,
-        'options': {'stripacl': True, 'recursive': True}
-    }, job=True)
-
-    assert call('filesystem.stat', f'/mnt/{ACLTEST_SUBDATASET}')['acl'] is True
-
-    st = call('filesystem.stat', f'/mnt/{ACLTEST_DATASET}')
-    assert st['acl'] is False, str(st)
-    assert oct(st['mode']) == '0o40777', str(st)
-
-    st = call('filesystem.stat', f'/mnt/{ACLTEST_DATASET}/dir1')
-    assert st['acl'] is False, str(st)
-    assert oct(st['mode']) == '0o40777', str(st)
-
-    st = call('filesystem.stat', f'/mnt/{ACLTEST_DATASET}/dir1/testfile')
-    assert st['acl'] is False, str(st)
-    assert oct(st['mode']) == '0o100777', str(st)
-
-
-def test_20_delete_child_dataset(request):
-    depends(request, ["HAS_NFS4_ACLS"])
-    call('pool.dataset.delete', ACLTEST_SUBDATASET)
-
-
-@pytest.mark.dependency(name="HAS_TESTFILE")
-def test_22_prep_testfile(request):
-    depends(request, ["HAS_NFS4_ACLS"], scope="session")
-    ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt')
-
-
-"""
-The following tests verify that DENY ACEs are functioning correctly.
-Deny ace will be prepended to base ACL that grants FULL_CONTROL.
-
-#define VREAD_NAMED_ATTRS       000000200000 /* not used */
-#define VWRITE_NAMED_ATTRS      000000400000 /* not used */
-#define VDELETE_CHILD           000001000000
-#define VREAD_ATTRIBUTES        000002000000 /* permission to stat(2) */
-#define VWRITE_ATTRIBUTES       000004000000 /* change {m,c,a}time */
-#define VDELETE                 000010000000
-#define VREAD_ACL               000020000000 /* read ACL and file mode */
-#define VWRITE_ACL              000040000000 /* change ACL and/or file mode */
-#define VWRITE_OWNER            000100000000 /* change file owner */
-#define VSYNCHRONIZE            000200000000 /* not used */
-
-Some tests must be skipped due to lack of implementation in VFS.
-"""
-
-
-@pytest.mark.parametrize('perm', IMPLEMENTED_DENY)
-def test_23_test_acl_function_deny(perm, request):
-    """
-    Iterate through available permissions and prepend
-    deny ACE denying that particular permission to the
-    acltest user, then attempt to perform an action that
-    should result in failure.
-    """
-    depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session")
-
-    if perm == "FULL_DELETE":
-        to_deny = {"DELETE_CHILD": True, "DELETE": True}
-    else:
-        to_deny = {perm: True}
-
-    payload_acl = [{
-        "tag": "USER",
-        "id": TEST_INFO['user']['uid'],
-        "type": "DENY",
-        "perms": to_deny,
-        "flags": {"BASIC": "INHERIT"}
-    }]
-
-    payload_acl.extend(function_testing_acl_deny)
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 0, 'uid': 0,
-        'options': {'recursive': True},
-    }, job=True)
-
-    if perm == "EXECUTE":
-        cmd = f'cd /mnt/{ACLTEST_DATASET}'
-
-    elif perm == "READ_ATTRIBUTES":
-        cmd = f'stat /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm in ["DELETE", "DELETE_CHILD", "FULL_DELETE"]:
-        cmd = f'rm -f /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "READ_DATA":
-        cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_DATA":
-        cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_ATTRIBUTES":
-        cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "READ_ACL":
-        cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_ACL":
-        cmd = f'{setfaclcmd} -b /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_OWNER":
-        cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    else:
-        # This should never happen.
-        cmd = "touch /var/empty/ERROR"
-
-    results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-    """
-    Per RFC5661 Section 6.2.1.3.2, deletion is permitted if either
-    DELETE_CHILD is permitted on parent, or DELETE is permitted on
-    file. This means that it should succeed when tested in isolation,
-    but fail when combined.
-
-    Unfortunately, this is implemented differenting in FreeBSD vs Linux.
-    Former follows above recommendation, latter does not in that denial
-    of DELETE on file takes precedence over allow of DELETE_CHILD.
-    """
-    errstr = f'cmd: {cmd}, res: {results["output"]}, to_deny {to_deny}'
-    expected_delete = ["DELETE_CHILD"]
-    if perm in expected_delete:
-        assert results['result'] is True, errstr
-
-        # unfortunately, we now need to recreate our testfile.
-        ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt')
-    elif perm == "READ_ATTRIBUTES":
-        assert results['result'] is True, errstr
-    else:
-        assert results['result'] is False, errstr
-
-
-@pytest.mark.parametrize('perm', IMPLEMENTED_ALLOW)
-def test_24_test_acl_function_allow(perm, request):
-    """
-    Iterate through available permissions and prepend
-    allow ACE permitting that particular permission to the
-    acltest user, then attempt to perform an action that
-    should result in success.
-    """
-    depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session")
-
-    """
-    Some extra permissions bits must be set for these tests
-    EXECUTE so that we can traverse to the path in question
-    and READ_ATTRIBUTES because most of the utilites we use
-    for testing have to stat(2) the files.
-    """
-    to_allow = {perm: True}
-    if perm != "EXECUTE":
-        to_allow["EXECUTE"] = True
-
-    if perm != "READ_ATTRIBUTES":
-        to_allow["READ_ATTRIBUTES"] = True
-
-    if perm == "WRITE_ACL":
-        to_allow["READ_ACL"] = True
-
-    payload_acl = [{
-        "tag": "USER",
-        "id": TEST_INFO['user']['uid'],
-        "type": "ALLOW",
-        "perms": to_allow,
-        "flags": {"BASIC": "INHERIT"}
-    }]
-    payload_acl.extend(function_testing_acl_allow)
-
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 65534, 'uid': 0,
-        'options': {'recursive': True},
-    }, job=True)
-
-    if perm == "EXECUTE":
-        cmd = f'cd /mnt/{ACLTEST_DATASET}'
-
-    elif perm == "READ_ATTRIBUTES":
-        cmd = f'stat /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm in ["DELETE", "DELETE_CHILD", "FULL_DELETE"]:
-        cmd = f'rm /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "READ_DATA":
-        cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_DATA":
-        cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_ATTRIBUTES":
-        cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "READ_ACL":
-        cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_ACL":
-        cmd = f'{setfaclcmd} -x 0 /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_OWNER":
-        cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    else:
-        # This should never happen.
-        cmd = "touch /var/empty/ERROR"
-
-    results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-    errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-    assert results['result'] is True, errstr
-    if perm in ["DELETE", "DELETE_CHILD"]:
-        # unfortunately, we now need to recreate our testfile.
-        ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt')
-
-
-@pytest.mark.parametrize('perm', IMPLEMENTED_ALLOW)
-def test_25_test_acl_function_omit(perm, request):
-    """
-    Iterate through available permissions and add permissions
-    required for an explicit ALLOW of that ACE from the previous
-    test to succeed. This sets the stage to have success hinge
-    on presence of the particular permissions bit. Then we omit
-    it. This should result in a failure.
-    """
-    depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session")
-
-    """
-    Some extra permissions bits must be set for these tests
-    EXECUTE so that we can traverse to the path in question
-    and READ_ATTRIBUTES because most of the utilites we use
-    for testing have to stat(2) the files.
-    """
-    to_allow = {}
-    if perm != "EXECUTE":
-        to_allow["EXECUTE"] = True
-
-    if perm != "READ_ATTRIBUTES":
-        to_allow["READ_ATTRIBUTES"] = True
-
-    if perm == "WRITE_ACL":
-        to_allow["READ_ACL"] = True
-
-    payload_acl = [{
-        "tag": "USER",
-        "id": TEST_INFO['user']['uid'],
-        "type": "ALLOW",
-        "perms": to_allow,
-        "flags": {"BASIC": "INHERIT"}
-    }]
-
-    payload_acl.extend(function_testing_acl_allow)
-
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 65534, 'uid': 0,
-        'options': {'recursive': True},
-    }, job=True)
-
-    if perm == "EXECUTE":
-        cmd = f'cd /mnt/{ACLTEST_DATASET}'
-
-    elif perm == "READ_ATTRIBUTES":
-        cmd = f'stat /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm in ["DELETE", "DELETE_CHILD", "FULL_DELETE"]:
-        cmd = f'rm /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "READ_DATA":
-        cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_DATA":
-        cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_ATTRIBUTES":
-        cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "READ_ACL":
-        cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_ACL":
-        cmd = f'{setfaclcmd} -x 0 /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    elif perm == "WRITE_OWNER":
-        cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt'
-
-    else:
-        # This should never happen.
-        cmd = "touch /var/empty/ERROR"
-
-    results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-    errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-    assert results['result'] is False, errstr
-
-
-@pytest.mark.parametrize('perm', IMPLEMENTED_ALLOW)
-def test_25_test_acl_function_allow_restrict(perm, request):
-    """
-    Iterate through implemented allow permissions and verify that
-    they grant no more permissions than intended. Some bits cannot
-    be tested in isolation effectively using built in utilities.
-    """
-    depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session")
-
-    """
-    Some extra permissions bits must be set for these tests
-    EXECUTE so that we can traverse to the path in question
-    and READ_ATTRIBUTES because most of the utilites we use
-    for testing have to stat(2) the files.
-    """
-    to_allow = {}
-    tests_to_skip = []
-    tests_to_skip.append(perm)
-
-    if perm != "EXECUTE":
-        to_allow["EXECUTE"] = True
-        tests_to_skip.append("EXECUTE")
-
-    if perm != "READ_ATTRIBUTES":
-        to_allow["READ_ATTRIBUTES"] = True
-        tests_to_skip.append("READ_ATTRIBUTES")
-
-    if perm == "DELETE_CHILD":
-        tests_to_skip.append("DELETE")
-
-    payload_acl = [{
-        "tag": "USER",
-        "id": TEST_INFO['user']['uid'],
-        "type": "ALLOW",
-        "perms": to_allow,
-        "flags": {"BASIC": "INHERIT"}
-    }]
-    payload_acl.extend(function_testing_acl_allow)
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 65534, 'uid': 0,
-        'options': {'recursive': True},
-    }, job=True)
-
-    if "EXECUTE" not in tests_to_skip:
-        cmd = f'cd /mnt/{ACLTEST_DATASET}'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-
-    if "DELETE" not in tests_to_skip:
-        cmd = f'rm /mnt/{ACLTEST_DATASET}/acltest.txt'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-        if results['result'] is True:
-            # File must be re-created. Kernel ACL inheritance routine
-            # will ensure that new file has right ACL.
-            ssh(f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt')
-
-    if "READ_DATA" not in tests_to_skip:
-        cmd = f'cat /mnt/{ACLTEST_DATASET}/acltest.txt'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-
-    if "WRITE_DATA" not in tests_to_skip:
-        cmd = f'echo -n "CAT" >> /mnt/{ACLTEST_DATASET}/acltest.txt'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-
-    if "WRITE_ATTRIBUTES" not in tests_to_skip:
-        cmd = f'touch -a -m -t 201512180130.09 /mnt/{ACLTEST_DATASET}/acltest.txt'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-
-    if "READ_ACL" not in tests_to_skip:
-        cmd = f'{getfaclcmd} /mnt/{ACLTEST_DATASET}/acltest.txt'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-
-    if "WRITE_ACL" not in tests_to_skip:
-        cmd = f'{setfaclcmd} -x 0 /mnt/{ACLTEST_DATASET}/acltest.txt'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-
-    if "WRITE_OWNER" not in tests_to_skip:
-        cmd = f'chown {ACL_USER} /mnt/{ACLTEST_DATASET}/acltest.txt'
-        results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-        errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {to_allow}'
-        assert results['result'] is False, errstr
-
-
-def test_26_file_execute_deny(request):
-    """
-    Base permset with everyone@ FULL_CONTROL, but ace added on
-    top explictly denying EXECUTE. Attempt to execute file should fail.
-    """
-    depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session")
-    payload_acl = [
-        {
-            "tag": "USER",
-            "id": TEST_INFO['user']['uid'],
-            "type": "DENY",
-            "perms": {"EXECUTE": True},
-            "flags": {"FILE_INHERIT": True}
-        },
-        {
-            "tag": "USER",
-            "id": TEST_INFO['user']['uid'],
-            "type": "ALLOW",
-            "perms": {"EXECUTE": True},
-            "flags": {"BASIC": "NOINHERIT"}
-        },
-    ]
-    payload_acl.extend(function_testing_acl_deny)
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 0, 'uid': 0,
-        'options': {'recursive': True},
-    }, job=True)
-
-    ssh(f'echo "echo CANARY" > /mnt/{ACLTEST_DATASET}/acltest.txt')
-
-    cmd = f'/mnt/{ACLTEST_DATASET}/acltest.txt'
-    results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-    errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {payload_acl}'
-    assert results['result'] is False, errstr
-
-
-def test_27_file_execute_allow(request):
-    """
-    Verify that setting execute allows file execution. READ_DATA and
-    READ_ATTRIBUTES are also granted beecause we need to be able to
-    stat and read our test script.
-    """
-    depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session")
-    payload_acl = [
-        {
-            "tag": "USER",
-            "id": TEST_INFO['user']['uid'],
-            "type": "ALLOW",
-            "perms": {
-                "EXECUTE": True,
-                "READ_DATA": True,
-                "READ_ATTRIBUTES": True
-            },
-            "flags": {"FILE_INHERIT": True}
-        },
-        {
-            "tag": "USER",
-            "id": TEST_INFO['user']['uid'],
-            "type": "ALLOW",
-            "perms": {"EXECUTE": True},
-            "flags": {"BASIC": "NOINHERIT"}
-        },
-    ]
-    payload_acl.extend(function_testing_acl_allow)
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 0, 'uid': 0,
-        'options': {'recursive': True},
-    }, job=True)
-
-    ssh(f'echo "echo CANARY" > /mnt/{ACLTEST_DATASET}/acltest.txt')
-
-    cmd = f'/mnt/{ACLTEST_DATASET}/acltest.txt'
-    results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-    errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {payload_acl}'
-    assert results['result'] is True, errstr
-
-
-def test_28_file_execute_omit(request):
-    """
-    Grant user all permissions except EXECUTE. Attempt to execute
-    file should fail.
-    """
-    depends(request, ["HAS_NFS4_ACLS", "HAS_TESTFILE"], scope="session")
-    payload_acl = [
-        {
-            "tag": "USER",
-            "id": TEST_INFO['user']['uid'],
-            "type": "ALLOW",
-            "perms": base_permset.copy(),
-            "flags": {"FILE_INHERIT": True}
-        },
-        {
-            "tag": "USER",
-            "id": TEST_INFO['user']['uid'],
-            "type": "ALLOW",
-            "perms": {"EXECUTE": True},
-            "flags": {"BASIC": "NOINHERIT"}
-        },
-    ]
-    payload_acl.extend(function_testing_acl_allow)
-    # at this point the user's ACE has all perms set
-    # remove execute.
-    payload_acl[0]['perms']['EXECUTE'] = False
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 0, 'uid': 0,
-        'options': {'recursive': True},
-    }, job=True)
-
-    ssh(f'echo "echo CANARY" > /mnt/{ACLTEST_DATASET}/acltest.txt')
-
-    cmd = f'/mnt/{ACLTEST_DATASET}/acltest.txt'
-    results = SSH_TEST(cmd, ACL_USER, ACL_PWD)
-    errstr = f'cmd: {cmd}, res: {results["output"]}, to_allow {payload_acl}'
-    assert results['result'] is False, errstr
-
-
-def test_29_owner_restrictions(request):
-    depends(request, ["HAS_NFS4_ACLS"], scope="session")
-
-    payload_acl = [{
-        "tag": "owner@",
-        "id": -1,
-        "type": "ALLOW",
-        "perms": {"BASIC": "READ"},
-        "flags": {"BASIC": "INHERIT"}
-    }]
-    call('filesystem.setacl', {
-        'path': TEST_INFO['dataset_path'],
-        'dacl': payload_acl,
-        'gid': 0, 'uid': TEST_INFO['user']['uid'],
-        'options': {'recursive': True},
-    }, job=True)
-
-    results = ssh(
-        f'mkdir /mnt/{ACLTEST_DATASET}/dir1/dir_should_not_exist',
-        complete_response=True, check=False,
-        user=ACL_USER, password=ACL_PWD
-    )
-
-    assert results['result'] is False, str(results)
-
-    results = ssh(
-        f'touch /mnt/{ACLTEST_DATASET}/dir1/file_should_not_exist',
-        complete_response=True, check=False,
-        user=ACL_USER, password=ACL_PWD
-    )
-
-    assert results['result'] is False, str(results)
-
-
-def test_30_acl_inherit_nested_dataset():
-    with make_dataset("acl_test_inherit1", data={'share_type': 'SMB'}) as ds1:
-        call('filesystem.add_to_acl', {
-            'path': os.path.join('/mnt', ds1),
-            'entries': [{'id_type': 'GROUP', 'id': 666, 'access': 'READ'}]
-        }, job=True)
-
-        acl1 = call('filesystem.getacl', os.path.join('/mnt', ds1))
-        assert any(x['id'] == 666 for x in acl1['acl'])
-
-        with pytest.raises(ValidationErrors):
-            # ACL on parent dataset prevents adding APPS group to ACL. Fail.
-            with make_dataset("acl_test_inherit1/acl_test_inherit2", data={'share_type': 'APPS'}):
-                pass
-
-        with make_dataset("acl_test_inherit1/acl_test_inherit2", data={'share_type': 'NFS'}) as ds2:
-            acl2 = call('filesystem.getacl', os.path.join('/mnt', ds2))
-            assert acl1['acltype'] == acl2['acltype']
-            assert any(x['id'] == 666 for x in acl2['acl'])
diff --git a/tests/api2/test_347_posix_mode.py b/tests/api2/test_347_posix_mode.py
deleted file mode 100644
index dc46df4e69e29..0000000000000
--- a/tests/api2/test_347_posix_mode.py
+++ /dev/null
@@ -1,459 +0,0 @@
-#!/usr/bin/env python3
-
-# License: BSD
-
-import os
-import pytest
-import stat
-
-from functions import SSH_TEST
-from middlewared.test.integration.assets.account import user, group
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from time import sleep
-
-
-MODE_DATASET_NAME = 'modetest'
-MODE_SUBDATASET_NAME = 'modetest/sub1'
-
-OWNER_BITS = {
-    "OWNER_READ": stat.S_IRUSR,
-    "OWNER_WRITE": stat.S_IWUSR,
-    "OWNER_EXECUTE": stat.S_IXUSR,
-}
-
-GROUP_BITS = {
-    "GROUP_READ": stat.S_IRGRP,
-    "GROUP_WRITE": stat.S_IWGRP,
-    "GROUP_EXECUTE": stat.S_IXGRP,
-}
-
-OTHER_BITS = {
-    "OTHER_READ": stat.S_IROTH,
-    "OTHER_WRITE": stat.S_IWOTH,
-    "OTHER_EXECUTE": stat.S_IXOTH,
-}
-
-MODE = {**OWNER_BITS, **GROUP_BITS, **OTHER_BITS}
-
-MODE_USER = "modetesting"
-MODE_GROUP = "modetestgrp"
-MODE_PWD = "modetesting"
-
-
-@pytest.fixture(scope='module')
-def get_dataset():
-    with dataset(MODE_DATASET_NAME) as ds:
-        path = os.path.join('/mnt', ds)
-        ssh(f'mkdir -p {path}/dir1/dir2')
-        ssh(f'touch {path}/dir1/dir2/testfile')
-
-        with dataset(MODE_SUBDATASET_NAME):
-            yield ds
-
-
-@pytest.fixture(scope='module')
-def get_user():
-    with group({"name": MODE_GROUP}) as g:
-        with user({
-            'username': MODE_USER,
-            'full_name': MODE_USER,
-            'password': MODE_PWD,
-            'group_create': True,
-            'shell': '/usr/bin/bash',
-            'ssh_password_enabled': True,
-            'groups': [g['id']]
-        }) as u:
-            yield u | {'group_gid': g['gid']}
-
-
-@pytest.fixture(scope='function')
-def setup_file(get_dataset):
-    ds_path = os.path.join('/mnt', get_dataset)
-    try:
-        ssh(f'echo "echo CANARY" > {ds_path}/canary')
-        yield
-    finally:
-        ssh(f'rm {ds_path}/canary', check=False)
-
-
-def get_mode_octal(path):
-    mode = call('filesystem.stat', path)['mode']
-    return f"{stat.S_IMODE(mode):03o}"
-
-
-@pytest.mark.dependency(name="IS_TRIVIAL")
-def test_verify_acl_is_trivial(get_dataset):
-    st = call('filesystem.stat', os.path.join('/mnt', get_dataset))
-    assert st['acl'] is False
-
-
-@pytest.mark.parametrize('mode_bit', MODE.keys())
-def test_verify_setting_mode_bits_nonrecursive(get_dataset, mode_bit):
-    """
-    This test iterates through possible POSIX permissions bits and
-    verifies that they are properly set on the remote server.
-    """
-    new_mode = f"{MODE[mode_bit]:03o}"
-    path = os.path.join('/mnt', get_dataset)
-
-    call('filesystem.setperm', {
-        'path': path,
-        'mode': new_mode,
-        'uid': 65534,
-        'gid': 65534
-    }, job=True)
-
-    server_mode = get_mode_octal(path)
-    assert new_mode == server_mode
-
-
-@pytest.mark.parametrize('mode_bit', MODE.keys())
-def test_verify_setting_mode_bits_recursive_no_traverse(get_dataset, mode_bit):
-    """
-    Perform recursive permissions change and verify new mode written
-    to files and subdirectories.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    sub_ds_path = os.path.join(ds_path, 'sub1')
-
-    new_mode = f"{MODE[mode_bit]:03o}"
-    call('filesystem.setperm', {
-        'path': ds_path,
-        'mode': new_mode,
-        'uid': 65534,
-        'gid': 65534,
-        'options': {'recursive': True}
-    }, job=True)
-
-    server_mode = get_mode_octal(ds_path)
-    assert new_mode == server_mode
-
-    server_mode = get_mode_octal(os.path.join(ds_path, 'dir1', 'dir2'))
-    assert new_mode == server_mode
-
-    server_mode = get_mode_octal(os.path.join(ds_path, 'dir1', 'dir2', 'testfile'))
-    assert new_mode == server_mode
-
-    # child dataset shouldn't be touched
-    server_mode = get_mode_octal(sub_ds_path)
-    assert server_mode == "755"
-
-
-def test_verify_traverse_to_child_dataset(get_dataset):
-    ds_path = os.path.join('/mnt', get_dataset)
-    sub_ds_path = os.path.join(ds_path, 'sub1')
-
-    call('filesystem.setperm', {
-        'path': ds_path,
-        'mode': '777',
-        'uid': 65534,
-        'gid': 65534,
-        'options': {'recursive': True, 'traverse': True}
-    }, job=True)
-
-    server_mode = get_mode_octal(sub_ds_path)
-    assert server_mode == "777"
-
-
-def dir_mode_check(mode_bit, MODE_DATASET):
-    if mode_bit.endswith("READ"):
-        cmd = f'ls /mnt/{MODE_DATASET}'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is True, results['output']
-
-        cmd = f'touch /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        cmd = f'cd /mnt/{MODE_DATASET}'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-    elif mode_bit.endswith("WRITE"):
-        cmd = f'ls /mnt/{MODE_DATASET}'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        # Ensure that file is deleted before trying to create
-        ssh(f'rm /mnt/{MODE_DATASET}/canary', check=False)
-
-        cmd = f'touch /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is True, results['output']
-
-        cmd = f'rm /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is True, results['output']
-
-    elif mode_bit.endswith("EXECUTE"):
-        cmd = f'ls /mnt/{MODE_DATASET}'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        # Ensure that file is deleted before trying to create
-        ssh(f'rm /mnt/{MODE_DATASET}/canary', check=False)
-
-        cmd = f'touch /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-
-def file_mode_check(mode_bit, MODE_DATASET):
-    if mode_bit.endswith("READ"):
-        cmd = f'cat /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is True, results['output']
-        assert results['stdout'].strip() == "echo CANARY", results['output']
-
-        cmd = f'echo "FAIL" >> /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        cmd = f'/mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-    elif mode_bit.endswith("WRITE"):
-        cmd = f'cat /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        cmd = f'echo "SUCCESS" > /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is True, results['output']
-
-        cmd = f'/mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        """
-        Parent directory does not have write bit set. This
-        means rm should fail even though WRITE is set for user.
-        """
-        cmd = f'rm /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        ssh(f'echo "echo CANARY" > /mnt/{MODE_DATASET}/canary')
-
-    elif mode_bit.endswith("EXECUTE"):
-        cmd = f'cat /mnt/{MODE_DATASET}'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-        cmd = f'echo "FAIL" > /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-
-def file_mode_check_xor(mode_bit, MODE_DATASET):
-    """
-    when this method is called, all permissions bits are set except for
-    the one being tested.
-    """
-    if mode_bit.endswith("READ"):
-        cmd = f'cat /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-    elif mode_bit.endswith("WRITE"):
-        cmd = f'echo "SUCCESS" > /mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-    elif mode_bit.endswith("EXECUTE"):
-        cmd = f'/mnt/{MODE_DATASET}/canary'
-        results = SSH_TEST(cmd, MODE_USER, MODE_PWD)
-        assert results['result'] is False, results['output']
-
-
-@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys())
-def test_directory_owner_bits_function_allow(mode_bit, get_dataset, get_user):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    In case of directory, Execute must be set concurrently with write
-    in order to verify correct write behavior.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    new_mode = MODE[mode_bit]
-    if new_mode == stat.S_IWUSR:
-        new_mode |= stat.S_IXUSR
-
-    call('filesystem.setperm', {
-        'path': ds_path,
-        'mode': f'{new_mode:03o}',
-        'uid': get_user['uid'],
-        'gid': 65534,
-    }, job=True)
-
-    dir_mode_check(mode_bit, get_dataset)
-
-
-@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys())
-def test_directory_group_bits_function_allow(mode_bit, get_dataset, get_user):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    In case of directory, Execute must be set concurrently with write
-    in order to verify correct write behavior.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-
-    new_mode = MODE[mode_bit]
-    if new_mode == stat.S_IWGRP:
-        new_mode |= stat.S_IXGRP
-
-    call('filesystem.setperm', {
-        'path': ds_path,
-        'mode': f'{new_mode:03o}',
-        'uid': 0,
-        'gid': get_user['group_gid'],
-    }, job=True)
-
-    dir_mode_check(mode_bit, get_dataset)
-
-
-@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys())
-def test_directory_other_bits_function_allow(mode_bit, get_dataset, setup_file):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    In case of directory, Execute must be set concurrently with write
-    in order to verify correct write behavior.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-
-    new_mode = MODE[mode_bit]
-    if new_mode == stat.S_IWOTH:
-        new_mode |= stat.S_IXOTH
-
-    call('filesystem.setperm', {
-        'path': ds_path,
-        'mode': f'{new_mode:03o}',
-        'uid': 0,
-        'gid': 0,
-    }, job=True)
-
-    sleep(5)
-    dir_mode_check(mode_bit, get_dataset)
-
-
-def test_setup_dataset_perm(get_dataset):
-    """ Allow execute permission on dataset mountpoint to facilitate file testing """
-    ds_path = os.path.join('/mnt', get_dataset)
-    call('filesystem.setperm', {
-        'path': ds_path,
-        'mode': '001',
-        'uid': 0,
-        'gid': 0,
-    }, job=True)
-
-
-@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys())
-def test_file_owner_bits_function_allow(mode_bit, get_dataset, get_user, setup_file):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    new_mode = MODE[mode_bit]
-
-    call('filesystem.setperm', {
-        'path': os.path.join(ds_path, 'canary'),
-        'mode': f'{new_mode:03o}',
-        'uid': get_user['uid'],
-        'gid': 0,
-    }, job=True)
-
-    file_mode_check(mode_bit, get_dataset)
-
-
-@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys())
-def test_file_group_bits_function_allow(mode_bit, get_dataset, get_user, setup_file):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    new_mode = MODE[mode_bit]
-
-    call('filesystem.setperm', {
-        'path': os.path.join(ds_path, 'canary'),
-        'mode': f'{new_mode:03o}',
-        'gid': get_user['group_gid'],
-        'uid': 0,
-    }, job=True)
-
-    file_mode_check(mode_bit, get_dataset)
-
-
-@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys())
-def test_file_other_bits_function_allow(mode_bit, get_dataset, get_user, setup_file):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    new_mode = MODE[mode_bit]
-
-    call('filesystem.setperm', {
-        'path': os.path.join(ds_path, 'canary'),
-        'mode': f'{new_mode:03o}',
-        'gid': 0,
-        'uid': 0,
-    }, job=True)
-
-    file_mode_check(mode_bit, get_dataset)
-
-
-@pytest.mark.parametrize('mode_bit', OWNER_BITS.keys())
-def test_file_owner_bits_xor(mode_bit, get_dataset, get_user, setup_file):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
-    new_mode = new_mode ^ MODE[mode_bit]
-
-    call('filesystem.setperm', {
-        'path': os.path.join(ds_path, 'canary'),
-        'mode': f'{new_mode:03o}',
-        'gid': 0,
-        'uid': get_user['uid'],
-    }, job=True)
-
-    file_mode_check_xor(mode_bit, get_dataset)
-
-
-@pytest.mark.parametrize('mode_bit', GROUP_BITS.keys())
-def test_file_group_bits_xor(mode_bit, get_dataset, get_user, setup_file):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
-    new_mode = new_mode ^ MODE[mode_bit]
-
-    call('filesystem.setperm', {
-        'path': os.path.join(ds_path, 'canary'),
-        'mode': f'{new_mode:03o}',
-        'gid': get_user['group_gid'],
-        'uid': 0,
-    }, job=True)
-
-    file_mode_check_xor(mode_bit, get_dataset)
-
-
-@pytest.mark.parametrize('mode_bit', OTHER_BITS.keys())
-def test_file_other_bits_xor(mode_bit, get_dataset, get_user, setup_file):
-    """
-    Verify mode behavior correct when it's the only bit set.
-    """
-    ds_path = os.path.join('/mnt', get_dataset)
-    new_mode = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
-    new_mode = new_mode ^ MODE[mode_bit]
-
-    call('filesystem.setperm', {
-        'path': os.path.join(ds_path, 'canary'),
-        'mode': f'{new_mode:03o}',
-        'gid': 0,
-        'uid': 0,
-    }, job=True)
-
-    file_mode_check_xor(mode_bit, get_dataset)
diff --git a/tests/api2/test_420_smb.py b/tests/api2/test_420_smb.py
deleted file mode 100644
index 6bf86c15f2cd9..0000000000000
--- a/tests/api2/test_420_smb.py
+++ /dev/null
@@ -1,449 +0,0 @@
-import pytest
-import sys
-import os
-import secrets
-import string
-import uuid
-from time import sleep
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from protocols import smb_connection
-from utils import create_dataset
-from auto_config import pool_name
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.assets.pool import dataset as make_dataset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.system import reset_systemd_svcs
-
-
-AUDIT_WAIT = 10
-SMB_NAME = "TestCifsSMB"
-SHAREUSER = 'smbuser420'
-PASSWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-
-@pytest.fixture(scope='module')
-def smb_info():
-    with make_dataset('smb-cifs', data={'share_type': 'SMB'}) as ds:
-        with user({
-            'username': SHAREUSER,
-            'full_name': SHAREUSER,
-            'group_create': True,
-            'password': PASSWD
-        }, get_instance=False):
-            with smb_share(os.path.join('/mnt', ds), SMB_NAME, {
-                'purpose': 'NO_PRESET',
-            }) as s:
-                try:
-                    call('smb.update', {
-                        'guest': SHAREUSER
-                    })
-                    call('service.update', 'cifs', {'enable': True})
-                    call('service.start', 'cifs')
-                    yield {'dataset': ds, 'share': s}
-                finally:
-                    call('smb.update', {
-                        'guest': 'nobody'
-                    })
-                    call('service.stop', 'cifs')
-                    call('service.update', 'cifs', {'enable': False})
-
-
-@pytest.fixture(scope='function')
-def enable_guest(smb_info):
-    smb_id = smb_info['share']['id']
-    call('sharing.smb.update', smb_id, {'guestok': True})
-    try:
-        yield
-    finally:
-        call('sharing.smb.update', smb_id, {'guestok': False})
-
-
-@pytest.fixture(scope='function')
-def enable_aapl():
-    reset_systemd_svcs('smbd')
-    call('smb.update', {'aapl_extensions': True})
-
-    try:
-        yield
-    finally:
-        call('smb.update', {'aapl_extensions': False})
-
-
-@pytest.fixture(scope='function')
-def enable_smb1():
-    reset_systemd_svcs('smbd')
-    call('smb.update', {'enable_smb1': True})
-
-    try:
-        yield
-    finally:
-        call('smb.update', {'enable_smb1': False})
-
-
-@pytest.fixture(scope='function')
-def enable_recycle_bin(smb_info):
-    smb_id = smb_info['share']['id']
-    call('sharing.smb.update', smb_id, {'recyclebin': True})
-
-    try:
-        yield
-    finally:
-        call('sharing.smb.update', smb_id, {'recyclebin': False})
-
-
-@pytest.mark.parametrize('proto,runas', [
-    ('SMB1', 'GUEST'),
-    ('SMB2', 'GUEST'),
-    ('SMB1', SHAREUSER),
-    ('SMB2', SHAREUSER)
-])
-def test__basic_smb_ops(enable_smb1, enable_guest, proto, runas):
-    with smb_connection(
-        share=SMB_NAME,
-        username=runas,
-        password=PASSWD,
-        smb1=(proto == 'SMB1')
-    ) as c:
-        filename1 = f'testfile1_{proto.lower()}_{runas}.txt'
-        filename2 = f'testfile2_{proto.lower()}_{runas}.txt'
-        dirname = f'testdir_{proto.lower()}_{runas}.txt'
-
-        fd = c.create_file(filename1, 'w')
-        c.write(fd, b'foo')
-        val = c.read(fd, 0, 3)
-        c.close(fd, True)
-        assert val == b'foo'
-
-        c.mkdir(dirname)
-        fd = c.create_file(f'{dirname}/{filename2}', 'w')
-        c.write(fd, b'foo2')
-        val = c.read(fd, 0, 4)
-        c.close(fd, True)
-        assert val == b'foo2'
-
-        c.rmdir(dirname)
-
-        # DELETE_ON_CLOSE flag was set prior to closing files
-        # and so root directory should be empty
-        assert c.ls('/') == []
-
-
-def test__change_sharing_smd_home_to_true(smb_info):
-    reset_systemd_svcs('smbd')
-    smb_id = smb_info['share']['id']
-    share = call('sharing.smb.update', smb_id, {'home': True})
-    try:
-        share_path = call('smb.getparm', 'path', 'homes')
-        assert share_path == f'{share["path_local"]}/%U'
-    finally:
-        new_info = call('sharing.smb.update', smb_id, {'home': False})
-
-    share_path = call('smb.getparm', 'path', new_info['name'])
-    assert share_path == share['path_local']
-    obey_pam_restrictions = call('smb.getparm', 'obey pam restrictions', 'GLOBAL')
-    assert obey_pam_restrictions is False
-
-
-def test__change_timemachine_to_true(enable_aapl, smb_info):
-    smb_id = smb_info['share']['id']
-    call('sharing.smb.update', smb_id, {'timemachine': True})
-    try:
-        share_info = call('sharing.smb.query', [['id', '=', smb_id]], {'get': True})
-        assert share_info['timemachine'] is True
-
-        enabled = call('smb.getparm', 'fruit:time machine', share_info['name'])
-        assert enabled == 'True'
-
-        vfs_obj = call('smb.getparm', 'vfs objects', share_info['name'])
-        assert 'fruit' in vfs_obj
-    finally:
-        call('sharing.smb.update', smb_id, {'timemachine': False})
-
-
-def do_recycle_ops(c, has_subds=False):
-    # Our recycle repository should be auto-created on connect.
-    fd = c.create_file('testfile.txt', 'w')
-    c.write(fd, b'foo')
-    c.close(fd, True)
-
-    # Above close op also deleted the file and so
-    # we expect file to now exist in the user's .recycle directory
-    fd = c.create_file(f'.recycle/{SHAREUSER}/testfile.txt', 'r')
-    val = c.read(fd, 0, 3)
-    c.close(fd)
-    assert val == b'foo'
-
-    # re-open so that we can set DELETE_ON_CLOSE
-    # this verifies that SMB client can purge file from recycle bin
-    c.close(c.create_file(f'.recycle/{SHAREUSER}/testfile.txt', 'w'), True)
-    assert c.ls(f'.recycle/{SHAREUSER}/') == []
-
-    if not has_subds:
-        return
-
-    # nested datasets get their own recycle bin to preserve atomicity of
-    # rename op.
-    fd = c.create_file('subds/testfile2.txt', 'w')
-    c.write(fd, b'boo')
-    c.close(fd, True)
-
-    fd = c.create_file(f'subds/.recycle/{SHAREUSER}/testfile2.txt', 'r')
-    val = c.read(fd, 0, 3)
-    c.close(fd)
-    assert val == b'boo'
-
-    c.close(c.create_file(f'subds/.recycle/{SHAREUSER}/testfile2.txt', 'w'), True)
-    assert c.ls(f'subds/.recycle/{SHAREUSER}/') == []
-
-
-def test__recyclebin_functional_test(enable_recycle_bin, smb_info):
-    with create_dataset(f'{smb_info["dataset"]}/subds', {'share_type': 'SMB'}):
-        with smb_connection(
-            share=SMB_NAME,
-            username=SHAREUSER,
-            password=PASSWD,
-        ) as c:
-            do_recycle_ops(c, True)
-
-
-@pytest.mark.parametrize('smb_config', [
-    {'global': {'aapl_extensions': True}, 'share': {'aapl_name_mangling': True}},
-    {'global': {'aapl_extensions': True}, 'share': {'aapl_name_mangling': False}},
-    {'global': {'aapl_extensions': False}, 'share': {}},
-])
-def test__recyclebin_functional_test_subdir(smb_info, smb_config):
-    tmp_ds = f"{pool_name}/recycle_test"
-    tmp_ds_path = f'/mnt/{tmp_ds}/subdir'
-    tmp_share_name = 'recycle_test'
-
-    reset_systemd_svcs('smbd')
-    call('smb.update', smb_config['global'])
-    # basic tests of recyclebin operations
-    with create_dataset(tmp_ds, {'share_type': 'SMB'}):
-        ssh(f'mkdir {tmp_ds_path}')
-        with smb_share(tmp_ds_path, tmp_share_name, {
-            'purpose': 'NO_PRESET',
-            'recyclebin': True
-        } | smb_config['share']):
-            with smb_connection(
-                share=tmp_share_name,
-                username=SHAREUSER,
-                password=PASSWD,
-            ) as c:
-                do_recycle_ops(c)
-
-    # more abusive test where first TCON op is opening file in subdir to delete
-    with create_dataset(tmp_ds, {'share_type': 'SMB'}):
-        ops = [
-            f'mkdir {tmp_ds_path}',
-            f'mkdir {tmp_ds_path}/subdir',
-            f'touch {tmp_ds_path}/subdir/testfile',
-            f'chown {SHAREUSER} {tmp_ds_path}/subdir/testfile',
-        ]
-        ssh(';'.join(ops))
-        with smb_share(tmp_ds_path, tmp_share_name, {
-            'purpose': 'NO_PRESET',
-            'recyclebin': True
-        } | smb_config['share']):
-            with smb_connection(
-                share=tmp_share_name,
-                username=SHAREUSER,
-                password=PASSWD,
-            ) as c:
-                fd = c.create_file('subdir/testfile', 'w')
-                c.write(fd, b'boo')
-                c.close(fd, True)
-
-                fd = c.create_file(f'.recycle/{SHAREUSER}/subdir/testfile', 'r')
-                val = c.read(fd, 0, 3)
-                c.close(fd)
-                assert val == b'boo'
-
-
-def test__netbios_name_change_check_sid():
-    """ changing netbiosname should not alter our local sid value """
-    orig = call('smb.config')
-    new_sid = call('smb.update', {'netbiosname': 'nb_new'})['cifs_SID']
-
-    try:
-        assert new_sid == orig['cifs_SID']
-        localsid = call('smb.groupmap_list')['localsid']
-        assert new_sid == localsid
-    finally:
-        call('smb.update', {'netbiosname': orig['netbiosname']})
-
-
-AUDIT_FIELDS = [
-    'audit_id', 'timestamp', 'address', 'username', 'session', 'service',
-    'service_data', 'event', 'event_data', 'success'
-]
-
-
-def validate_vers(vers, expected_major, expected_minor):
-    assert 'major' in vers, str(vers)
-    assert 'minor' in vers, str(vers)
-    assert vers['major'] == expected_major
-    assert vers['minor'] == expected_minor
-
-
-def validate_svc_data(msg, svc):
-    assert 'service_data' in msg, str(msg)
-    svc_data = msg['service_data']
-    for key in ['vers', 'service', 'session_id', 'tcon_id']:
-        assert key in svc_data, str(svc_data)
-
-    assert svc_data['service'] == svc
-
-    assert isinstance(svc_data['session_id'], str)
-    assert svc_data['session_id'].isdigit()
-
-    assert isinstance(svc_data['tcon_id'], str)
-    assert svc_data['tcon_id'].isdigit()
-
-
-def validate_event_data(event_data, schema):
-    event_data_keys = set(event_data.keys())
-    schema_keys = set(schema['_attrs_order_'])
-    assert event_data_keys == schema_keys
-
-
-def validate_audit_op(msg, svc):
-    schema = call(
-        'audit.json_schemas',
-        [['_name_', '=', f'audit_entry_smb_{msg["event"].lower()}']],
-        {
-            'select': [
-                ['_attrs_order_', 'attrs'],
-                ['properties.event_data', 'event_data']
-            ],
-        }
-    )
-
-    assert schema is not [], str(msg)
-    schema = schema[0]
-
-    for key in schema['attrs']:
-        assert key in msg, str(msg)
-
-    validate_svc_data(msg, svc)
-    try:
-        aid_guid = uuid.UUID(msg['audit_id'])
-    except ValueError:
-        raise AssertionError(f'{msg["audit_id"]}: malformed UUID')
-
-    assert str(aid_guid) == msg['audit_id']
-
-    try:
-        sess_guid = uuid.UUID(msg['session'])
-    except ValueError:
-        raise AssertionError(f'{msg["session"]}: malformed UUID')
-
-    assert str(sess_guid) == msg['session']
-
-    validate_event_data(msg['event_data'], schema['event_data'])
-
-
-def do_audit_ops(svc):
-    with smb_connection(
-        share=svc,
-        username=SHAREUSER,
-        password=PASSWD,
-    ) as c:
-        fd = c.create_file('testfile.txt', 'w')
-        for i in range(0, 3):
-            c.write(fd, b'foo')
-            c.read(fd, 0, 3)
-        c.close(fd, True)
-
-    sleep(AUDIT_WAIT)
-    return call('auditbackend.query', 'SMB', [['event', '!=', 'AUTHENTICATION']])
-
-
-def test__audit_log(request):
-    def get_event(event_list, ev_type):
-        for e in event_list:
-            if e['event'] == ev_type:
-                return e
-
-        return None
-
-    with make_dataset('smb-audit', data={'share_type': 'SMB'}) as ds:
-        with smb_share(os.path.join('/mnt', ds), 'SMB_AUDIT', {
-            'purpose': 'NO_PRESET',
-            'guestok': True,
-            'audit': {'enable': True}
-        }) as s:
-            events = do_audit_ops(s['name'])
-            assert len(events) > 0
-
-            for ev_type in ['CONNECT', 'DISCONNECT', 'CREATE', 'CLOSE', 'READ', 'WRITE']:
-                assert get_event(events, ev_type) is not None, str(events)
-
-            for event in events:
-                validate_audit_op(event, s['name'])
-
-            new_data = call('sharing.smb.update', s['id'], {'audit': {'ignore_list': ['builtin_users']}})
-            assert new_data['audit']['enable'], str(new_data['audit'])
-            assert new_data['audit']['ignore_list'] == ['builtin_users'], str(new_data['audit'])
-
-            # Verify that being member of group in ignore list is sufficient to avoid new messages
-            # By default authentication attempts are always logged
-            assert do_audit_ops(s['name']) == events
-
-            new_data = call('sharing.smb.update', s['id'], {'audit': {'watch_list': ['builtin_users']}})
-            assert new_data['audit']['enable'], str(new_data['audit'])
-            assert new_data['audit']['ignore_list'] == ['builtin_users'], str(new_data['audit'])
-            assert new_data['audit']['watch_list'] == ['builtin_users'], str(new_data['audit'])
-
-            # Verify that watch_list takes precedence
-            # By default authentication attempts are always logged
-            new_events = do_audit_ops(s['name'])
-            assert len(new_events) > len(events)
-
-            new_data = call('sharing.smb.update', s['id'], {'audit': {'enable': False}})
-            assert new_data['audit']['enable'] is False, str(new_data['audit'])
-            assert new_data['audit']['ignore_list'] == ['builtin_users'], str(new_data['audit'])
-            assert new_data['audit']['watch_list'] == ['builtin_users'], str(new_data['audit'])
-
-            # Verify that disabling audit prevents new messages from being written
-            assert do_audit_ops(s['name']) == new_events
-
-
-@pytest.mark.parametrize('torture_test', [
-    'local.binding',
-    'local.ntlmssp',
-    'local.smbencrypt',
-    'local.messaging',
-    'local.irpc',
-    'local.strlist',
-    'local.file',
-    'local.str',
-    'local.time',
-    'local.datablob',
-    'local.binsearch',
-    'local.asn1',
-    'local.anonymous_shared',
-    'local.strv',
-    'local.strv_util',
-    'local.util',
-    'local.idtree',
-    'local.dlinklist',
-    'local.genrand',
-    'local.iconv',
-    'local.socket',
-    'local.pac',
-    'local.share',
-    'local.loadparm',
-    'local.charset',
-    'local.convert_string',
-    'local.string_case_handle',
-    'local.tevent_req',
-    'local.util_str_escape',
-    'local.talloc',
-    'local.replace',
-    'local.crypto.md4'
-])
-def test__local_torture(request, torture_test):
-    ssh(f'smbtorture //127.0.0.1 {torture_test}')
diff --git a/tests/api2/test_425_smb_protocol.py b/tests/api2/test_425_smb_protocol.py
deleted file mode 100644
index a2e5f38a5efcc..0000000000000
--- a/tests/api2/test_425_smb_protocol.py
+++ /dev/null
@@ -1,812 +0,0 @@
-import os
-import enum
-import secrets
-import string
-from base64 import b64decode, b64encode
-
-import pytest
-from pytest_dependency import depends
-
-from functions import SSH_TEST
-from auto_config import user, password
-from middlewared.test.integration.assets.account import user as create_user
-from middlewared.test.integration.assets.smb import copy_stream, get_stream, smb_share, smb_mount
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-# DO NOT change the order of these imports
-# or the ntstatus import will fail
-from protocols import SMB, smb_connection
-from samba import ntstatus
-from samba import NTSTATUSError
-
-
-SMB_NAME = "SMBPROTO"
-SMB_USER = "smbuser"
-SMB_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-TEST_DATA = {}
-
-
-class DOSmode(enum.Enum):
-    READONLY = 1
-    HIDDEN = 2
-    SYSTEM = 4
-    ARCHIVE = 32
-
-
-netatalk_metadata = """
-AAUWBwACAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAEAAAAmgAAAAAAAAAIAAABYgAAABAAAAAJAAAA
-egAAACAAAAAOAAABcgAAAASAREVWAAABdgAAAAiASU5PAAABfgAAAAiAU1lOAAABhgAAAAiAU1Z+
-AAABjgAAAARQTEFQbHRhcAQQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAAAAAAAAAAAAAAIbmGsyG5hrOAAAAAKEvSOAAAAAAAAAAAAAAAAAcBAAAAAAAA9xS5YAAAAAAZ
-AAAA
-"""
-
-parsed_meta = """
-QUZQAAAAAQAAAAAAgAAAAFBMQVBsdGFwBBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
-AAAA
-"""
-
-apple_kmdlabel = """
-8oBNzAaTG04NeBVAT078KCEjrzPrwPTUuZ4MXK1qVRDlBqLATmFSDFO2hXrS5VWsrg1DoZqeX6kF
-zDEInIzw2XrZkI9lY3jvMAGXu76QvwrpRGv1G3Ehj+0=
-"""
-
-apple_kmditemusertags = """
-YnBsaXN0MDCgCAAAAAAAAAEBAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAJ
-"""
-
-AFPXattr = {
-    "org.netatalk.Metadata": {
-        "smbname": "AFP_AfpInfo",
-        "text": netatalk_metadata,
-        "bytes": b64decode(netatalk_metadata),
-        "smb_text": parsed_meta,
-        "smb_bytes": b64decode(parsed_meta)
-    },
-    "com.apple.metadata:_kMDItemUserTags": {
-        "smbname": "com.apple.metadata_kMDItemUserTags",
-        "text": apple_kmditemusertags,
-        "bytes": b64decode(apple_kmditemusertags)
-    },
-    "com.apple.metadata:kMDLabel_anemgxoe73iplck2hfuumqxdbu": {
-        "smbname": "com.apple.metadatakMDLabel_anemgxoe73iplck2hfuumqxdbu",
-        "text": apple_kmdlabel,
-        "bytes": b64decode(apple_kmdlabel)
-    },
-}
-
-
-@pytest.fixture(scope='module')
-def initialize_for_smb_tests(request):
-    with dataset('smb-proto', data={'share_type': 'SMB'}) as ds:
-        with create_user({
-            'username': SMB_USER,
-            'full_name': SMB_USER,
-            'group_create': True,
-            'password': SMB_PWD
-        }) as u:
-            try:
-                with smb_share(os.path.join('/mnt', ds), SMB_NAME, {
-                    'auxsmbconf': 'zfs_core:base_user_quota = 1G'
-                }) as s:
-                    try:
-                        call('service.start', 'cifs')
-                        yield {'dataset': ds, 'share': s, 'user': u}
-                    finally:
-                        call('service.stop', 'cifs')
-            finally:
-                # In test_140_enable_aapl we turned afp on for the share, so wait until
-                # it has been destroyed before turning off aapl_extensions.
-                call('smb.update', {
-                    'enable_smb1': False,
-                    'aapl_extensions': False
-                })
-
-
-@pytest.fixture(scope='module')
-def mount_share():
-    with smb_mount(TEST_DATA['share']['name'], SMB_USER, SMB_PWD) as mp:
-        yield {'mountpoint': mp}
-
-
-@pytest.mark.dependency(name="SMB_SHARE_CREATED")
-def test_001_initialize_smb_servce(initialize_for_smb_tests):
-    TEST_DATA.update(initialize_for_smb_tests)
-
-
-def test_002_check_client_count(request):
-    depends(request, ["SMB_SHARE_CREATED"])
-    with smb_connection(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=False
-    ):
-        assert call("smb.client_count") == 1
-
-
-@pytest.mark.dependency(name="SHARE_IS_WRITABLE")
-def test_009_share_is_writable(request):
-    """
-    This test creates creates an empty file, sets "delete on close" flag, then
-    closes it. NTStatusError should be raised containing failure details
-    if we are for some reason unable to access the share.
-
-    This test will fail if smb.conf / smb4.conf does not exist on client / server running test.
-    """
-    depends(request, ["SMB_SHARE_CREATED"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    fd = c.create_file("testfile", "w")
-    c.close(fd, True)
-    c.disconnect()
-
-
-@pytest.mark.parametrize('dm', DOSmode)
-def test_010_check_dosmode_create(request, dm):
-    """
-    This tests the setting of different DOS attributes through SMB2 Create.
-    after setting
-    """
-    depends(request, ["SHARE_IS_WRITABLE"])
-    if dm.value > DOSmode.SYSTEM.value:
-        return
-
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    if dm == DOSmode.READONLY:
-        c.create_file(dm.name, "w", "r")
-    elif dm == DOSmode.HIDDEN:
-        c.create_file(dm.name, "w", "h")
-    elif dm == DOSmode.SYSTEM:
-        c.create_file(dm.name, "w", "s")
-    dir_listing = c.ls("/")
-    for f in dir_listing:
-        if f['name'] != dm.name:
-            continue
-        # Archive is automatically set by kernel
-        to_check = f['attrib'] & ~DOSmode.ARCHIVE.value
-        c.disconnect()
-        assert (to_check & dm.value) != 0, f
-
-
-def test_011_check_dos_ro_cred_handling(request):
-    """
-    This test creates a file with readonly attribute set, then
-    uses the open fd to write data to the file.
-    """
-    depends(request, ["SHARE_IS_WRITABLE"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    fd = c.create_file("RO_TEST", "w", "r")
-    c.write(fd, b"TESTING123\n")
-    c.disconnect()
-
-
-@pytest.mark.dependency(name="SMB1_ENABLED")
-def test_050_enable_smb1(request):
-    depends(request, ["SMB_SHARE_CREATED"])
-    call("smb.update", {"enable_smb1": True})
-
-
-@pytest.mark.dependency(name="SHARE_IS_WRITABLE_SMB1")
-def test_051_share_is_writable_smb1(request):
-    """
-    This test creates creates an empty file, sets "delete on close" flag, then
-    closes it. NTStatusError should be raised containing failure details
-    if we are for some reason unable to access the share.
-
-    This test will fail if client min protocol != NT1 in smb.conf of SMB client.
-    Sample smb.conf entry:
-
-    [global]
-    client min protocol = nt1
-    """
-    depends(request, ["SMB_SHARE_CREATED"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
-    fd = c.create_file("testfile", "w")
-    c.close(fd, True)
-    c.disconnect()
-
-
-@pytest.mark.parametrize('dm', DOSmode)
-def test_052_check_dosmode_create_smb1(request, dm):
-    """
-    This tests the setting of different DOS attributes through SMB1 create.
-    after setting
-    """
-    depends(request, ["SHARE_IS_WRITABLE"])
-    if dm.value > DOSmode.SYSTEM.value:
-        return
-
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
-    if dm == DOSmode.READONLY:
-        c.create_file(f'{dm.name}_smb1', "w", "r")
-    elif dm == DOSmode.HIDDEN:
-        c.create_file(f'{dm.name}_smb1', "w", "h")
-    elif dm == DOSmode.SYSTEM:
-        c.create_file(f'{dm.name}_smb1', "w", "s")
-    dir_listing = c.ls("/")
-    for f in dir_listing:
-        if f['name'] != f'{dm.name}_smb1':
-            continue
-        # Archive is automatically set by kernel
-        to_check = f['attrib'] & ~DOSmode.ARCHIVE.value
-        c.disconnect()
-        assert (to_check & dm.value) != 0, f
-
-
-@pytest.mark.dependency(name="STREAM_TESTFILE_CREATED")
-def test_060_create_base_file_for_streams_tests(request):
-    """
-    Create the base file that we will use for further stream tests.
-    """
-    depends(request, ["SMB_SHARE_CREATED"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
-    fd = c.create_file("streamstestfile", "w")
-    c.close(fd)
-    c.mkdir("streamstestdir")
-    c.disconnect()
-
-
-@pytest.mark.dependency(name="STREAM_WRITTEN_SMB2")
-def test_061_create_and_write_stream_smb2(request, mount_share):
-    """
-    Create our initial stream and write to it over SMB2/3 protocol.
-    Start with offset 0.
-    """
-    depends(request, ["STREAM_TESTFILE_CREATED"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    fd = c.create_file("streamstestfile:smb2_stream", "w")
-    c.write(fd, b'test1', 0)
-    c.close(fd)
-
-    fd2 = c.create_file("streamstestdir:smb2_stream", "w")
-    c.write(fd2, b'test2', 0)
-    c.close(fd2)
-
-    fd3 = c.create_file("streamstestfile:smb2_stream", "w")
-    contents = c.read(fd3, 0, 5)
-    c.close(fd3)
-    kcontent1 = get_stream('streamstestfile', 'smb2_stream')
-
-    fd4 = c.create_file("streamstestdir:smb2_stream", "w")
-    contents2 = c.read(fd4, 0, 5)
-    c.close(fd4)
-    kcontent2 = get_stream('streamstestdir', 'smb2_stream')
-
-    c.rmdir("streamstestdir")
-    c.disconnect()
-    assert (contents.decode() == "test1")
-    assert (contents2.decode() == "test2")
-
-    # Remove samba compatibility NULL byte
-    assert kcontent1[:-1].decode() == 'test1'
-    assert kcontent2[:-1].decode() == 'test2'
-
-
-@pytest.mark.dependency(name="LARGE_STREAM_WRITTEN_SMB2")
-def test_062_write_stream_large_offset_smb2(request, mount_share):
-    """
-    Append to our existing stream over SMB2/3 protocol. Specify an offset that will
-    cause resuling xattr to exceed 64KiB default xattr size limit in Linux.
-    """
-    depends(request, ["STREAM_TESTFILE_CREATED"])
-    with smb_connection(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=False
-    ) as c:
-        fd = c.create_file("streamstestfile:smb2_stream", "w")
-        try:
-            c.write(fd, b'test2', 131072)
-        finally:
-            c.close(fd)
-
-        fd2 = c.create_file("streamstestfile:smb2_stream", "w")
-        try:
-            contents = c.read(fd2, 131072, 5)
-        finally:
-            c.close(fd2)
-
-        kcontent = get_stream('streamstestfile', 'smb2_stream')
-
-        assert (contents.decode() == "test2")
-
-        # Verify that reading a large stream functions correctly
-        assert len(kcontent) == 131072 + 5 + 1
-
-        # Remove samba compatibility NULL byte
-        assert kcontent[131072:-1].decode() == 'test2'
-
-        # Verify that SMB server rejects too-large stream write
-        fd = c.create_file("streamstestfile:smb2_stream", "w")
-        try:
-            with pytest.raises(NTSTATUSError) as e:
-                c.write(fd, b'test2', 2097152)
-
-            assert e.value.args[0] == ntstatus.NT_STATUS_FILE_SYSTEM_LIMITATION
-        finally:
-            c.close(fd)
-
-        # Verify that SMB server allows _very_ large write
-        fd = c.create_file("streamstestfile:smb2_stream", "w")
-        try:
-            # We have to an extra byte for that nul at end of xattr
-            offset = 2097152 - (len(b"test2") + 1)
-            c.write(fd, b"test2", offset)
-            contents = c.read(fd, offset, 5)
-            assert contents.decode() == "test2"
-        finally:
-            c.close(fd)
-
-        copy_stream('streamstestfile', 'smb2_stream', 'smb2_stream2')
-
-        fd = c.create_file("streamstestfile:smb2_stream", "r")
-        try:
-            contents_stream1 = c.read(fd, 0, 2097152)
-        finally:
-            c.close(fd)
-
-        fd = c.create_file("streamstestfile:smb2_stream2", "r")
-        try:
-            contents_stream2 = c.read(fd, 0, 2097152)
-        finally:
-            c.close(fd)
-
-        assert contents_stream1 == contents_stream2
-
-
-def test_063_stream_delete_on_close_smb2(request):
-    """
-    Set delete_on_close on alternate datastream over SMB2/3 protocol, close, then verify
-    stream was deleted.
-
-    TODO: I have open MR to expand samba python bindings to support stream enumeration.
-    Verifcation of stream deletion will have to be added once this is merged.
-    """
-    depends(request, ["STREAM_WRITTEN_SMB2", "LARGE_STREAM_WRITTEN_SMB2"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    fd = c.create_file("streamstestfile:smb2_stream", "w")
-    c.close(fd, True)
-
-    c.disconnect()
-
-
-@pytest.mark.dependency(name="STREAM_WRITTEN_SMB1")
-def test_065_create_and_write_stream_smb1(request):
-    """
-    Create our initial stream and write to it over SMB1 protocol.
-    Start with offset 0.
-    """
-    depends(request, ["STREAM_TESTFILE_CREATED"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
-    fd = c.create_file("streamstestfile:smb1_stream", "w")
-    c.write(fd, b'test1', 0)
-    c.close(fd)
-
-    fd2 = c.create_file("streamstestfile:smb1_stream", "w")
-    contents = c.read(fd2, 0, 5)
-    c.close(fd2)
-    c.disconnect()
-    assert (contents.decode() == "test1")
-
-
-@pytest.mark.dependency(name="LARGE_STREAM_WRITTEN_SMB1")
-def test_066_write_stream_large_offset_smb1(request):
-    """
-    Append to our existing stream over SMB1 protocol. Specify an offset that will
-    cause resuling xattr to exceed 64KiB default xattr size limit in Linux.
-    """
-    depends(request, ["STREAM_WRITTEN_SMB1"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
-    fd = c.create_file("streamstestfile:smb1_stream", "w")
-    c.write(fd, b'test2', 131072)
-    c.close(fd)
-
-    fd2 = c.create_file("streamstestfile:smb1_stream", "w")
-    contents = c.read(fd2, 131072, 5)
-    c.close(fd2)
-    c.disconnect()
-    assert (contents.decode() == "test2")
-
-
-def test_067_stream_delete_on_close_smb1(request):
-    """
-    Set delete_on_close on alternate datastream over SMB1 protocol, close, then verify
-    stream was deleted.
-
-    TODO: I have open MR to expand samba python bindings to support stream enumeration.
-    Verifcation of stream deletion will have to be added once this is merged.
-    """
-    depends(request, ["STREAM_WRITTEN_SMB1", "LARGE_STREAM_WRITTEN_SMB1"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
-    fd = c.create_file("streamstestfile:smb1_stream", "w")
-    c.close(fd, True)
-
-    c.disconnect()
-
-
-def test_068_case_insensitive_rename(request):
-    """
-    ZFS is case sensitive, but case preserving when casesensitivity == insensitive
-
-    rename of to_rename -> To_rename should succeed and new file appear
-    correctly in directory listing.
-
-    Will fail with NT_STATUS_OBJECT_NAME_COLLISION if we have regression and
-    samba identifies files as same.
-    """
-    depends(request, ["SHARE_IS_WRITABLE"])
-    with smb_connection(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=False
-    ) as c:
-        fd = c.create_file("to_rename", "w")
-        c.close(fd)
-        c.rename("to_rename", "To_rename")
-        files = [x['name'] for x in c.ls('\\')]
-        assert "To_rename" in files
-        assert "to_rename" not in files
-
-        # MacOS Sonoma currently (Aug 2024) gets SMB handle on file to be renamed
-        # potentially via the target of the rename which potentially hits optimization
-        # in samba. This validates that rename in this way also works on case-insensitve
-        # filesystems.
-        c.rename("to_rename", "to_rename")
-        files = [x['name'] for x in c.ls('\\')]
-        assert "to_rename" in files
-        assert "To_rename" not in files
-
-
-def test_069_normal_rename(request):
-    """
-    This verifies that renames are successfully completed
-    """
-    depends(request, ["SHARE_IS_WRITABLE"])
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=True)
-    fd = c.create_file("old_file_to_rename", "w")
-    c.close(fd)
-    c.rename("old_file_to_rename", "renamed_new_file")
-    files = [x['name'] for x in c.ls('\\')]
-    c.disconnect()
-    assert ("renamed_new_file" in files)
-
-
-"""
-At this point we grant SMB_USER SeDiskOperatorPrivilege by making it a member
-of the local group builtin_administrators. This privilege is required to manipulate
-SMB quotas.
-"""
-
-
-@pytest.mark.dependency(name="BA_ADDED_TO_USER")
-def test_089_add_to_builtin_admins(request):
-    depends(request, ["SHARE_IS_WRITABLE"])
-    smbuser_id = TEST_DATA['user']['id']
-    ba = call(
-        'group.query',
-        [['group', '=', 'builtin_administrators']],
-        {'get': True}
-    )
-    userinfo = call('user.query', [['id', '=', smbuser_id]], {'get': True})
-    groups = userinfo['groups']
-    groups.append(ba['id'])
-    call("user.update", smbuser_id, {'groups': groups})
-
-
-@pytest.mark.parametrize('proto', ["SMB2"])
-def test_090_test_auto_smb_quota(request, proto):
-    """
-    Since the share is configured wtih ixnas:base_user_quota parameter,
-    the first SMB tree connect should have set a ZFS user quota on the
-    underlying dataset. Test querying through the SMB protocol.
-
-    Currently SMB1 protocol is disabled because of hard-coded check in
-    source3/smbd/nttrans.c to only allow root to get/set quotas.
-    """
-    depends(request, ["BA_ADDED_TO_USER"])
-    c = SMB()
-    qt = c.get_quota(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=(proto == "SMB1")
-    )
-
-    # There should only be one quota entry
-    assert len(qt) == 1, qt
-
-    # username is prefixed with server netbios name "SERVER\user"
-    assert qt[0]['user'].endswith(SMB_USER), qt
-
-    # Hard and Soft limits should be set to value above (1GiB)
-    assert qt[0]['soft_limit'] == (2 ** 30), qt
-    assert qt[0]['hard_limit'] == (2 ** 30), qt
-
-
-def test_091_remove_auto_quota_param(request):
-    depends(request, ["SMB_SHARE_CREATED"])
-    call('sharing.smb.update', TEST_DATA['share']['id'], {
-        'auxsmbconf': ''
-    })
-
-
-@pytest.mark.parametrize('proto', ["SMB2"])
-def test_092_set_smb_quota(request, proto):
-    """
-    This test checks our ability to set a ZFS quota
-    through the SMB protocol by first setting a 2 GiB
-    quota, then reading it through the SMB protocol, then
-    resetting to zero.
-    """
-    depends(request, ["BA_ADDED_TO_USER"])
-    new_quota = 2 * (2**30)
-    c = SMB()
-    qt = c.set_quota(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        hardlimit=new_quota,
-        target=SMB_USER,
-        smb1=(proto == "SMB1")
-    )
-    assert len(qt) == 1, qt
-    assert qt[0]['user'].endswith(SMB_USER), qt
-    assert qt[0]['soft_limit'] == new_quota, qt
-    assert qt[0]['hard_limit'] == new_quota, qt
-
-    qt = c.get_quota(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=(proto == "SMB1")
-    )
-    assert len(qt) == 1, qt
-    assert qt[0]['user'].endswith(SMB_USER), qt
-    assert qt[0]['soft_limit'] == new_quota, qt
-    assert qt[0]['hard_limit'] == new_quota, qt
-
-    qt = c.set_quota(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        hardlimit=-1,
-        target=SMB_USER,
-        smb1=(proto == "SMB1")
-    )
-    assert len(qt) == 1, qt
-    assert qt[0]['user'].endswith(SMB_USER), qt
-    assert qt[0]['soft_limit'] is None, qt
-    assert qt[0]['hard_limit'] is None, qt
-
-    qt = c.get_quota(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=(proto == "SMB1")
-    )
-    assert len(qt) == 1, qt
-    assert qt[0]['user'].endswith(SMB_USER), qt
-    assert qt[0]['soft_limit'] is None, qt
-    assert qt[0]['hard_limit'] is None, qt
-
-
-def test_95_strip_quota(request):
-    """
-    This test removes any quota set for the test smb user
-    """
-    depends(request, ["BA_ADDED_TO_USER"])
-    call('pool.dataset.set_quota', TEST_DATA['dataset'], [{
-        'quota_type': 'USER',
-        'id': SMB_USER,
-        'quota_value': 0
-    }])
-
-
-@pytest.mark.dependency(name="AFP_ENABLED")
-def test_140_enable_aapl(request):
-    depends(request, ["SMB_SHARE_CREATED"])
-    call('smb.update', {'aapl_extensions': True})
-    call('sharing.smb.update', TEST_DATA['share']['id'], {
-        'afp': True,
-    })
-
-
-@pytest.mark.dependency(name="SSH_XATTR_SET")
-@pytest.mark.parametrize('xat', AFPXattr.keys())
-def test_151_set_xattr_via_ssh(request, xat):
-    """
-    Iterate through AFP xattrs and set them on testfile
-    via SSH.
-    """
-    depends(request, ["AFP_ENABLED"], scope="session")
-    smb_path = TEST_DATA['share']['path']
-    afptestfile = f'{smb_path}/afp_xattr_testfile'
-    cmd = f'touch {afptestfile} && chown {SMB_USER} {afptestfile} && '
-    cmd += f'echo -n \"{AFPXattr[xat]["text"]}\" | base64 -d | '
-    cmd += f'attr -q -s {xat} {afptestfile}'
-
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, {"cmd": cmd, "res": results['output']}
-
-
-@pytest.mark.dependency(name="XATTR_CHECK_SMB_READ")
-@pytest.mark.parametrize('xat', AFPXattr.keys())
-def test_152_check_xattr_via_smb(request, mount_share, xat):
-    """
-    Read xattr that was written via SSH and verify that
-    data is same when viewed over SMB.
-    """
-    depends(request, ["SSH_XATTR_SET"])
-    afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}'
-    bytes_to_read = AFPXattr[xat]["smb_bytes"] if xat == "org.netatalk.Metadata" else AFPXattr[xat]["bytes"]
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    fd = c.create_file(afptestfile, "w")
-    xat_bytes = c.read(fd, 0, len(bytes_to_read) + 1)
-    c.close(fd)
-    c.disconnect()
-
-    err = {
-        "name": xat,
-        "b64data": b64encode(xat_bytes)
-    }
-
-    # Python base64 library appends a `\t` to end of byte string
-    assert xat_bytes == bytes_to_read, str(err)
-
-    # Check via kernel client.
-    kcontent = get_stream('afp_xattr_testfile', AFPXattr[xat]['smbname'])
-    err = {
-        "name": xat,
-        "b64data": b64encode(kcontent[:-1])
-    }
-    assert kcontent[:-1] == bytes_to_read, str(err)
-
-
-@pytest.mark.dependency(name="XATTR_CHECK_SMB_UNLINK")
-@pytest.mark.parametrize('xat', AFPXattr.keys())
-def test_153_unlink_xattr_via_smb(request, xat):
-    """
-    Open AFP xattr, set "delete on close" flag, then close.
-    """
-    depends(request, ["XATTR_CHECK_SMB_READ"])
-    afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}'
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    fd = c.create_file(afptestfile, "w")
-    c.close(fd, True)
-    c.disconnect()
-
-
-@pytest.mark.dependency(name="XATTR_CHECK_SMB_WRITE")
-@pytest.mark.parametrize('xat', AFPXattr.keys())
-def test_154_write_afp_xattr_via_smb(request, xat):
-    """
-    Write xattr over SMB
-    """
-    depends(request, ["XATTR_CHECK_SMB_UNLINK"])
-    afptestfile = f'afp_xattr_testfile:{AFPXattr[xat]["smbname"]}'
-    payload = AFPXattr[xat]["smb_bytes"] if xat == "org.netatalk.Metadata" else AFPXattr[xat]["bytes"]
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-    fd = c.create_file(afptestfile, "w")
-    c.write(fd, payload)
-    c.close(fd)
-    c.disconnect()
-
-
-@pytest.mark.parametrize('xat', AFPXattr.keys())
-def test_155_ssh_read_afp_xattr(request, xat):
-    """
-    Read xattr that was set via SMB protocol directly via
-    SSH and verify that data is the same.
-    """
-    depends(request, ["XATTR_CHECK_SMB_WRITE"], scope="session")
-    # Netatalk-compatible xattr gets additional
-    # metadata written to it, which makes comparison
-    # of all bytes problematic.
-    if xat == "org.netatalk.Metadata":
-        return
-
-    smb_path = TEST_DATA['share']['path']
-    afptestfile = f'{smb_path}/afp_xattr_testfile'
-    cmd = f'attr -q -g {xat} {afptestfile} | base64'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, results['output']
-    xat_data = b64decode(results['stdout'])
-    assert AFPXattr[xat]['bytes'] == xat_data, results['output']
-
-
-def test_175_check_external_path(request):
-    with smb_share(f'EXTERNAL:{truenas_server.ip}\\{SMB_NAME}', 'EXTERNAL'):
-        with smb_connection(
-            share=SMB_NAME,
-            username=SMB_USER,
-            password=SMB_PWD,
-            smb1=False
-        ) as c:
-            fd = c.create_file('external_test_file', "w")
-            c.write(fd, b'EXTERNAL_TEST')
-            c.close(fd)
-
-        cmd = f'smbclient //127.0.0.1/EXTERNAL -U {SMB_USER}%{SMB_PWD} '
-        cmd += '-c "get external_test_file"'
-        ssh(cmd)
-
-        results = SSH_TEST('cat external_test_file', user, password)
-        assert results['result'] is True, results['output']
-        assert results['stdout'] == 'EXTERNAL_TEST'
-
-
-def test_176_check_dataset_auto_create(request):
-    with dataset('smb_proto_nested_datasets', data={'share_type': 'SMB'}) as ds:
-        ds_mp = os.path.join('/mnt', ds)
-        with smb_share(ds_mp, 'DATASETS', {'purpose': 'PRIVATE_DATASETS'}):
-            with smb_connection(
-                share='DATASETS',
-                username=SMB_USER,
-                password=SMB_PWD,
-                smb1=False
-            ) as c:
-                fd = c.create_file('nested_test_file', "w")
-                c.write(fd, b'EXTERNAL_TEST')
-                c.close(fd)
-
-        acl = call('filesystem.getacl', os.path.join(ds_mp, SMB_USER), True)
-        assert acl['trivial'] is False, str(acl)
-
-
-def test_180_create_share_multiple_dirs_deep(request):
-    with dataset('nested_dirs', data={'share_type': 'SMB'}) as ds:
-        dirs_path = os.path.join('/mnt', ds, 'd1/d2/d3')
-        ssh(f'mkdir -p {dirs_path}')
-
-        with smb_share(dirs_path, 'DIRS'):
-            with smb_connection(
-                share='DIRS',
-                username=SMB_USER,
-                password=SMB_PWD,
-                smb1=False
-            ) as c:
-                fd = c.create_file('nested_dirs_file', "w")
-                c.write(fd, b'DIRS_TEST')
-                c.close(fd)
-
-        call('filesystem.stat', os.path.join(dirs_path, 'nested_dirs_file'))
-
-
-def test_181_create_and_disable_share(request):
-    with dataset('smb_disabled', data={'share_type': 'SMB'}) as ds:
-        with smb_share(os.path.join('/mnt', ds), 'TO_DISABLE') as tmp_share:
-            with smb_connection(
-                share='TO_DISABLE',
-                username=SMB_USER,
-                password=SMB_PWD,
-                smb1=False
-            ) as c:
-                call('sharing.smb.update', tmp_share['id'], {'enabled': False})
-                try:
-                    c.create_file('canary', "w")
-                except NTSTATUSError as status:
-                    assert status.args[0] == ntstatus.NT_STATUS_NETWORK_NAME_DELETED, str(status)
-                else:
-                    assert c.connected is True
diff --git a/tests/api2/test_426_smb_vss.py b/tests/api2/test_426_smb_vss.py
deleted file mode 100644
index a61253178b7c4..0000000000000
--- a/tests/api2/test_426_smb_vss.py
+++ /dev/null
@@ -1,317 +0,0 @@
-from subprocess import run
-from time import sleep
-
-import pytest
-from pytest_dependency import depends
-
-# DO NOT change the order of these imports
-# or the ntstatus import will fail
-from protocols import SMB
-from samba import ntstatus
-
-from auto_config import pool_name, user, password
-from functions import SSH_TEST
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.client import truenas_server
-
-
-dataset = f"{pool_name}/smb-vss"
-dataset_nested = f"{dataset}/sub1"
-
-SMB_NAME = "SMBVSS"
-smb_path = "/mnt/" + dataset
-
-SMB_USER = "smbshadowuser"
-SMB_PWD = "smb1234"
-
-to_check = [
-    'testfile1',
-    f'{SMB_USER}/testfile2',
-    'sub1/testfile3'
-]
-
-snapshots = {
-    'snapshot1': {'gmt_string': '', 'offset': 18},
-    'snapshot2': {'gmt_string': '', 'offset': 36},
-    'snapshot3': {'gmt_string': '', 'offset': 54},
-}
-
-
-def check_previous_version_exists(path, home=False):
-    ip = truenas_server.ip
-    cmd = [
-        'smbclient',
-        f'//{ip}/{SMB_NAME if not home else SMB_USER}',
-        '-U', f'{SMB_USER}%{SMB_PWD}',
-        '-c' f'open {path}'
-    ]
-    cli_open = run(cmd, capture_output=True)
-    if cli_open.returncode != 0:
-        return (
-            ntstatus.NT_STATUS_FAIL_CHECK,
-            'NT_STATUS_FAIL_CHECK',
-            cli_open.stderr.decode()
-        )
-
-    cli_output = cli_open.stdout.decode().strip()
-    if 'NT_STATUS_' not in cli_output:
-        return (0, 'NT_STATUS_OK', cli_output)
-
-    cli_output = cli_output.rsplit(' ', 1)
-
-    return (
-        ntstatus.__getattribute__(cli_output[1]),
-        cli_output[1],
-        cli_output[0]
-    )
-
-
-"""
-def check_previous_version_contents(path, contents, offset):
-    cmd = [
-        'smbclient',
-        f'//{ip}/{SMB_NAME}',
-        '-U', f'{SMB_USER}%{SMB_PWD}',
-        '-c' f'prompt OFF; mget {path}'
-    ]
-    cli_get = run(cmd, capture_output=True)
-    if cli_get.returncode != 0:
-        return (
-            ntstatus.NT_STATUS_FAIL_CHECK,
-            'NT_STATUS_FAIL_CHECK',
-            cli_open.stderr.decode()
-        )
-
-    cli_output = cli_get.stdout.decode().strip()
-    if 'NT_STATUS_' in cli_output:
-        cli_output = cli_output.rsplit(' ', 1)
-        return (
-            ntstatus.__getattribute__(cli_output[1]),
-            cli_output[0]
-        )
-
-    with open(path[25:], "rb") as f:
-        bytes = f.read()
-
-    to_check = bytes[offset:]
-    assert len(to_check) == 9, f'path: {path}, contents: {to_check.decode()}'
-    os.unlink(path[25:])
-    assert to_check.decode() == contents, path
-    return (0, )
-"""
-
-
-@pytest.mark.parametrize('ds', [dataset, dataset_nested])
-@pytest.mark.dependency(name="VSS_DATASET_CREATED")
-def test_001_creating_smb_dataset(request, ds):
-    assert call("pool.dataset.create", {"name": ds, "share_type": "SMB"})
-    assert call("zfs.snapshot.create", {"dataset": ds, "name": "init"})
-
-
-@pytest.mark.dependency(name="VSS_USER_CREATED")
-def test_002_creating_shareuser_to_test_acls(request):
-    depends(request, ['VSS_DATASET_CREATED'])
-
-    global vssuser_id
-    global next_uid
-    next_uid = call('user.get_next_uid')
-    vssuser_id = call("user.create", {
-        "username": SMB_USER,
-        "full_name": "SMB User",
-        "group_create": True,
-        "password": SMB_PWD,
-        "uid": next_uid,
-    })
-
-
-def test_003_changing_dataset_owner(request):
-    depends(request, ["VSS_USER_CREATED"])
-    call(
-        "filesystem.chown",
-        {
-            'path': smb_path,
-            'uid': next_uid,
-            'options': {'recursive': True, 'traverse': True},
-        },
-        job=True
-    )
-
-
-@pytest.mark.dependency(name="VSS_SHARE_CREATED")
-def test_004_creating_a_smb_share_path(request):
-    depends(request, ["VSS_DATASET_CREATED"], scope="session")
-    global payload, results, smb_id
-    smb_id = call("sharing.smb.create", {
-        "comment": "SMB VSS Testing Share",
-        "path": smb_path,
-        "name": SMB_NAME,
-        "purpose": "NO_PRESET",
-    })['id']
-    cmd = f'mkdir {smb_path}/{SMB_USER}; zpool sync; net cache flush'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, {"cmd": cmd, "res": results['output']}
-
-
-@pytest.mark.dependency(name="VSS_SMB_SERVICE_STARTED")
-def test_005_starting_cifs_service(request):
-    depends(request, ["VSS_SHARE_CREATED"])
-    assert call("service.start", "cifs")
-
-
-@pytest.mark.dependency(name="VSS_SMB1_ENABLED")
-def test_006_enable_smb1(request):
-    depends(request, ["VSS_SHARE_CREATED"])
-    assert call("smb.update", {"enable_smb1": True})
-
-
-@pytest.mark.dependency(name="SHARE_HAS_SHADOW_COPIES")
-@pytest.mark.parametrize('proto', ["SMB1", "SMB2"])
-def test_007_check_shadow_copies(request, proto):
-    """
-    This is very basic validation of presence of snapshot
-    over SMB1 and SMB2/3.
-    """
-    depends(request, ["VSS_USER_CREATED"])
-    c = SMB()
-    snaps = c.get_shadow_copies(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=(proto == "SMB1")
-    )
-    assert len(snaps) == 1, snaps
-
-
-@pytest.mark.dependency(name="VSS_TESTFILES_CREATED")
-@pytest.mark.parametrize('payload', [
-    'snapshot1', 'snapshot2', 'snapshot3'
-])
-def test_008_set_up_testfiles(request, payload):
-    depends(request, ["SHARE_HAS_SHADOW_COPIES"])
-    i = int(payload[-1])
-    offset = i * 2 * len(payload)
-    c = SMB()
-    c.connect(share=SMB_NAME, username=SMB_USER, password=SMB_PWD, smb1=False)
-
-    for f in to_check:
-        fd = c.create_file(f, "w")
-        c.write(fd, payload.encode(), offset)
-        c.close(fd)
-
-        fd = c.create_file(f'{f}:smb2_stream', 'w')
-        c.write(fd, payload.encode(), offset)
-        c.close(fd)
-
-    sleep(5)
-    assert call("zfs.snapshot.create", {
-        "dataset": dataset,
-        "name": payload,
-        "recursive": True,
-    })
-
-
-@pytest.mark.parametrize('proto', ["SMB1", "SMB2"])
-def test_009_check_shadow_copies_count_after_setup(request, proto):
-    """
-    This is very basic validation of presence of snapshot
-    over SMB1 and SMB2/3.
-    """
-    depends(request, ["VSS_USER_CREATED"])
-    c = SMB()
-    snaps = c.get_shadow_copies(
-        share=SMB_NAME,
-        username=SMB_USER,
-        password=SMB_PWD,
-        smb1=(proto == "SMB1")
-    )
-    assert len(snaps) == 4, snaps
-    snaps.sort()
-    for idx, gmt in enumerate(snaps[1:]):
-        snapshots[f'snapshot{idx + 1}']['gmt_string'] = gmt
-
-
-@pytest.mark.dependency(name="VSS_TESTFILES_VALIDATED")
-@pytest.mark.parametrize('zfs, gmt_data', snapshots.items())
-def test_010_check_previous_versions_of_testfiles(request, zfs, gmt_data):
-    """
-    This test verifies that previous versions of files can be opened successfully
-    in the following situations:
-    1) root of share
-    2) subdirectory in share
-    3) child dataset in share
-
-    in (1) - (3) above, ability to open alternate data streams is also verified.
-    """
-    depends(request, ["VSS_TESTFILES_CREATED"])
-
-    vers = gmt_data['gmt_string']
-    for f in to_check:
-        the_file = f'{vers}/{f}'
-        err, errstr, msg = check_previous_version_exists(the_file)
-        assert err == 0, f'{the_file}: {errstr} - {msg}'
-
-        """
-        # further development of libsmb / smbclient required for this test
-        # best bet is to add a kwarg to py-libsmb create to allow openinging
-        # previous version of file.
-        err, msg = check_previous_version_contents(the_file, zfs, gmt_data['offset'])
-        assert err == 0, f'{the_file}: {msg}'
-        """
-        err, errstr, msg = check_previous_version_exists(f'{the_file}:smb2_stream')
-        assert err == 0, f'{the_file}:smb2_stream: {errstr} - {msg}'
-
-
-def test_011_convert_to_home_share(request):
-    depends(request, ["VSS_TESTFILES_VALIDATED"])
-    assert call("sharing.smb.update", smb_id, {"home": True})
-
-
-@pytest.mark.parametrize('zfs, gmt_data', snapshots.items())
-def test_012_check_previous_versions_of_testfiles_home_share(request, zfs, gmt_data):
-    """
-    This test verifies that previous versions of files can be opened successfully
-    in the following situations:
-    1) root of share
-    2) subdirectory in share
-    3) child dataset in share
-
-    in (1) - (3) above, ability to open alternate data streams is also verified.
-    Differs from previous test in that this one covers a "home" share, which is
-    of a directory inside a ZFS dataset, which means that internally samba cwd
-    has to change to path outside of share root.
-    """
-    depends(request, ["VSS_TESTFILES_VALIDATED"])
-    the_file = f'{gmt_data["gmt_string"]}/testfile2'
-    err, errstr, msg = check_previous_version_exists(the_file, True)
-    assert err == 0, f'{the_file}: {errstr} - {msg}'
-
-
-def test_050_delete_smb_user(request):
-    depends(request, ["VSS_USER_CREATED"])
-    call("user.delete", vssuser_id, {"delete_group": True})
-    call("sharing.smb.delete", smb_id)
-
-
-def test_051_disable_smb1(request):
-    depends(request, ["VSS_SMB1_ENABLED"])
-    assert call("smb.update", {"enable_smb1": False, "aapl_extensions": False})
-
-
-def test_052_stopping_smb_service(request):
-    depends(request, ["VSS_SMB_SERVICE_STARTED"])
-    assert call("service.stop", "cifs")
-    sleep(1)
-
-
-def test_053_checking_if_smb_is_stoped(request):
-    depends(request, ["VSS_SMB_SERVICE_STARTED"])
-    assert call(
-        "service.query",
-        [["service", "=", "cifs"]],
-        {"get": True},
-    )["state"] == "STOPPED"
-
-
-def test_054_destroying_smb_dataset(request):
-    depends(request, ["VSS_DATASET_CREATED"])
-    call("pool.dataset.delete", dataset, {'recursive': True})
diff --git a/tests/api2/test_427_smb_acl.py b/tests/api2/test_427_smb_acl.py
deleted file mode 100644
index a7f2db12a0460..0000000000000
--- a/tests/api2/test_427_smb_acl.py
+++ /dev/null
@@ -1,298 +0,0 @@
-#!/usr/bin/env python3
-
-import errno
-import pytest
-import sys
-import os
-import secrets
-import string
-import subprocess
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from auto_config import (
-    pool_name,
-)
-from middlewared.service_exception import ValidationError, ValidationErrors
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.client import truenas_server
-from middlewared.test.integration.utils.smb import security, smb_connection
-from middlewared.test.integration.utils.unittest import RegexString
-from pytest_dependency import depends
-from time import sleep
-from utils import create_dataset
-
-
-SMB_USER = "smbacluser"
-SMB_PWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-TEST_DATA = {}
-OWNER_RIGHTS_SID = 'S-1-3-4'
-
-
-permset = {
-    "READ_DATA": False,
-    "WRITE_DATA": False,
-    "APPEND_DATA": False,
-    "READ_NAMED_ATTRS": False,
-    "WRITE_NAMED_ATTRS": False,
-    "EXECUTE": False,
-    "DELETE_CHILD": False,
-    "READ_ATTRIBUTES": False,
-    "WRITE_ATTRIBUTES": False,
-    "DELETE": False,
-    "READ_ACL": False,
-    "WRITE_ACL": False,
-    "WRITE_OWNER": False,
-    "SYNCHRONIZE": True
-}
-
-flagset = {
-    "FILE_INHERIT": False,
-    "DIRECTORY_INHERIT": False,
-    "INHERIT_ONLY": False,
-    "NO_PROPAGATE_INHERIT": False,
-    "INHERITED": False
-}
-
-
-def get_windows_sd(share, format="LOCAL"):
-    return call("smb.get_remote_acl", {
-        "server": "127.0.0.1",
-        "share": share,
-        "username": SMB_USER,
-        "password": SMB_PWD,
-        "options": {"output_format": format}
-    })['acl_data']
-
-
-def iter_permset(path, share, local_acl):
-    smbacl = get_windows_sd(share)
-    assert smbacl['acl'][0]['perms'] == permset
-    for perm in permset.keys():
-        permset[perm] = True
-        call('filesystem.setacl', {'path': path, "dacl": local_acl}, job=True)
-        smbacl = get_windows_sd(share)
-        for ace in smbacl["acl"]:
-            if ace["id"] != 666:
-                continue
-
-            assert ace["perms"] == permset, f'{perm}: {str(ace)}'
-
-
-def iter_flagset(path, share, local_acl):
-    smbacl = get_windows_sd(share)
-    assert smbacl['acl'][0]['flags'] == flagset
-    for flag in flagset.keys():
-        # we automatically canonicalize entries and so INHERITED shifts to end of list
-        flagset[flag] = True
-        call('filesystem.setacl', {'path': path, "dacl": local_acl}, job=True)
-        smbacl = get_windows_sd(share)
-        for ace in smbacl["acl"]:
-            if ace["id"] != 666:
-                continue
-
-            assert ace["flags"] == flagset, f'{flag}: {str(ace)}'
-
-
-@pytest.fixture(scope='module')
-def initialize_for_smb_tests(request):
-    ba = call(
-        'group.query',
-        [['name', '=', 'builtin_administrators']],
-        {'get': True}
-    )
-    with user({
-        'username': SMB_USER,
-        'full_name': SMB_USER,
-        'group_create': True,
-        'smb': True,
-        'groups': [ba['id']],
-        'password': SMB_PWD
-    }) as u:
-        try:
-            call('service.start', 'cifs')
-            yield {'user': u}
-        finally:
-            call('service.stop', 'cifs')
-
-
-@pytest.mark.dependency(name="SMB_SERVICE_STARTED")
-def test_001_initialize_for_tests(initialize_for_smb_tests):
-    TEST_DATA.update(initialize_for_smb_tests)
-
-
-def test_003_test_perms(request):
-    """
-    This test creates a temporary dataset / SMB share,
-    then iterates through all the possible permissions bits
-    setting local FS ace for each of them and verifies that
-    correct NT ACL bit gets toggled when viewed through SMB
-    protocol.
-    """
-    depends(request, ["SMB_SERVICE_STARTED"], scope="session")
-
-    with dataset('nfs4acl_perms_smb', {'share_type': 'SMB'}) as ds:
-        path = os.path.join('/mnt', ds)
-        with smb_share(path, "PERMS"):
-            the_acl = call('filesystem.getacl', path, False)['acl']
-            the_acl.insert(0, {
-                'perms': permset,
-                'flags': flagset,
-                'id': 666,
-                'type': 'ALLOW',
-                'tag': 'USER'
-            })
-            call('filesystem.setacl', {'path': path, "dacl": the_acl}, job=True)
-            iter_permset(path, "PERMS", the_acl)
-
-
-def test_004_test_flags(request):
-    """
-    This test creates a temporary dataset / SMB share,
-    then iterates through all the possible inheritance flags
-    setting local FS ace for each of them and verifies that
-    correct NT ACL bit gets toggled when viewed through SMB
-    protocol.
-    """
-    depends(request, ["SMB_SERVICE_STARTED"], scope="session")
-
-    with dataset('nfs4acl_flags_smb', {'share_type': 'SMB'}) as ds:
-        path = os.path.join('/mnt', ds)
-        with smb_share(path, "FLAGS"):
-            the_acl = call('filesystem.getacl', path, False)['acl']
-            the_acl.insert(0, {
-                'perms': permset,
-                'flags': flagset,
-                'id': 666,
-                'type': 'ALLOW',
-                'tag': 'USER'
-            })
-            call('filesystem.setacl', {'path': path, "dacl": the_acl}, job=True)
-            iter_flagset(path, "FLAGS", the_acl)
-
-
-def test_005_test_map_modify(request):
-    """
-    This test validates that we are generating an appropriate SD when user has
-    'stripped' an ACL from an SMB share. Appropriate in this case means one that
-    grants an access mask equaivalent to MODIFY or FULL depending on whether it's
-    the file owner or group / other.
-    """
-    depends(request, ["SMB_SERVICE_STARTED"], scope="session")
-
-    ds = 'nfs4acl_map_modify'
-    path = f'/mnt/{pool_name}/{ds}'
-    with create_dataset(f'{pool_name}/{ds}', {'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH'}, None, '777'):
-        with smb_share(path, "MAP_MODIFY"):
-            sd = get_windows_sd("MAP_MODIFY", "SMB")
-            dacl = sd['dacl']
-            assert dacl[0]['access_mask']['standard'] == 'FULL', str(dacl[0])
-            assert dacl[1]['access_mask']['special']['WRITE_ATTRIBUTES'], str(dacl[1])
-            assert dacl[1]['access_mask']['special']['WRITE_EA'], str(dacl[1])
-            assert dacl[2]['access_mask']['special']['WRITE_ATTRIBUTES'], str(dacl[2])
-            assert dacl[2]['access_mask']['special']['WRITE_EA'], str(dacl[2])
-
-
-def test_006_test_preserve_dynamic_id_mapping(request):
-    depends(request, ["SMB_SERVICE_STARTED"], scope="session")
-
-    def _find_owner_rights(acl, owner_rights_id):
-        for entry in acl:
-            if entry['id'] == owner_rights_id:
-                return True
-
-        return False
-
-    ds = 'nfs4acl_dynmamic_user'
-    path = f'/mnt/{pool_name}/{ds}'
-    with create_dataset(f'{pool_name}/{ds}', {'share_type': 'SMB'}):
-        with smb_share(path, "DYNAMIC"):
-            # add an ACL entry that forces generation
-            # of a dynamic idmap entry
-            sleep(5)
-            cmd = [
-                'smbcacls',
-                f'//{truenas_server.ip}/DYNAMIC',
-                '\\',
-                '-a', r'ACL:S-1-3-4:ALLOWED/0x0/FULL',
-                '-d', '0',
-                '-U', f'{SMB_USER}%{SMB_PWD}',
-            ]
-            res = subprocess.run(cmd, capture_output=True)
-            assert res.returncode == 0, res.stderr.decode() or res.stdout.decode()
-
-            # Since winbindd is by default not in nsswitch when we're standalone
-            # the GID won't resolve to name
-            res = call('idmap.convert_sids', [OWNER_RIGHTS_SID])
-            assert OWNER_RIGHTS_SID in res['mapped']
-            assert res['mapped'][OWNER_RIGHTS_SID]['id_type'] == 'GROUP'
-            assert res['mapped'][OWNER_RIGHTS_SID]['name'].endswith('Owner Rights')
-            owner_rights_id = res['mapped'][OWNER_RIGHTS_SID]['id']
-
-            # verify "owner rights" entry is present
-            # verify "owner rights" entry is still present
-            the_acl = call('filesystem.getacl', path, False, True)['acl']
-            has_owner_rights = _find_owner_rights(the_acl, owner_rights_id)
-            assert has_owner_rights is True, str(the_acl)
-
-            # force re-sync of group mapping database (and winbindd_idmap.tdb)
-            call('smb.synchronize_group_mappings', job=True)
-
-            # verify "owner rights" entry is still present
-            the_acl = call('filesystem.getacl', path, False, True)['acl']
-            has_owner_rights = _find_owner_rights(the_acl, owner_rights_id)
-            assert has_owner_rights is True, str(the_acl)
-
-
-def test_007_test_disable_autoinherit(request):
-    depends(request, ["SMB_SERVICE_STARTED"], scope="session")
-    ds = 'nfs4acl_disable_inherit'
-    path = f'/mnt/{pool_name}/{ds}'
-    with create_dataset(f'{pool_name}/{ds}', {'share_type': 'SMB'}):
-        with smb_share(path, 'NFS4_INHERIT'):
-            with smb_connection(
-                share='NFS4_INHERIT',
-                username=SMB_USER,
-                password=SMB_PWD
-            ) as c:
-                c.mkdir('foo')
-                fh = c.create_file('foo', 'r')
-                sd = c.get_sd(fh, security.SECINFO_DACL)
-                c.close(fh)
-                assert sd.type & security.SEC_DESC_DACL_PROTECTED == 0, sd.as_sddl()
-                c.inherit_acl('foo', 'COPY')
-                fh = c.create_file('foo', 'r')
-                sd = c.get_sd(fh, security.SECINFO_DACL)
-                assert sd.type & security.SEC_DESC_DACL_PROTECTED, sd.as_sddl()
-
-
-def test_008_test_prevent_smb_dataset_update(request):
-    """
-    Prevent changing acltype and xattr on dataset hosting SMB shares
-    """
-    ds_name = 'prevent_changes'
-    path = f'/mnt/{pool_name}/{ds_name}'
-    with create_dataset(f'{pool_name}/{ds_name}') as ds:
-        with smb_share(path, 'SMB_SHARE_1'):
-            # Create a second share for testing purposes
-            with smb_share(path, 'SMB_SHARE_2'):
-
-                # Confirm we ignore requests that don't involve changes
-                for setting in [{"acltype": "POSIX"}]:
-                    call('pool.dataset.update', ds, setting)
-
-                # Confirm we block requests that involve changes
-                for setting in [{"acltype": "OFF"}]:
-                    attrib = list(setting.keys())[0]
-                    with pytest.raises(ValidationErrors) as ve:
-                        call('pool.dataset.update', ds, setting)
-                    assert ve.value.errors == [
-                        ValidationError(
-                            f"pool_dataset_update.{attrib}",
-                            RegexString("This dataset is hosting SMB shares. .*"),
-                            errno.EINVAL,
-                        )
-                    ]
-                    assert "SMB_SHARE_2" in str(ve.value.errors[0]), ve.value.errors[0]
diff --git a/tests/api2/test_428_smb_rpc.py b/tests/api2/test_428_smb_rpc.py
deleted file mode 100644
index 64d0d4cda254f..0000000000000
--- a/tests/api2/test_428_smb_rpc.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import os
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from protocols import MS_RPC
-
-
-SMB_USER = "smbrpcuser"
-SMB_PWD = "smb1234#!@"
-INVALID_SHARE_NAME_CHARACTERS = {'%', '<', '>', '*', '?', '|', '/', '\\', '+', '=', ';', ':', '"', ',', '[', ']'}
-
-@pytest.fixture(scope="module")
-def setup_smb_share(request):
-    with dataset('rpc_test', data={'share_type': 'SMB'}) as ds:
-        with smb_share(os.path.join('/mnt', ds), "RPC_TEST", {"abe": True, "purpose": "NO_PRESET"}) as s:
-            yield {'dataset': ds, 'share': s}
-
-@pytest.fixture(autouse=True, scope="function")
-def setup_smb_user(request):
-    with user({
-        "username": SMB_USER,
-        "full_name": SMB_USER,
-        "group_create": True,
-        "home": "/var/empty",
-        "password": SMB_PWD,
-    }) as u:
-        yield u
-
-
-def test_001_net_share_enum(setup_smb_user, setup_smb_share):
-    path = setup_smb_share['share']['path']
-    share_name = setup_smb_share['share']['name']
-
-    with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl:
-        shares = hdl.shares()
-        # IPC$ share should always be present
-        assert len(shares) == 2, str(shares)
-        assert shares[0]['netname'] == 'IPC$'
-        assert shares[0]['path'] == 'C:\\tmp'
-        assert shares[1]['netname'] == share_name
-        assert shares[1]['path'].replace('\\', '/')[2:] == path
-
-
-def test_002_enum_users(setup_smb_user, setup_smb_share):
-    payload = {
-        'query-filters': [['username', '=', SMB_USER]],
-        'query-options': {
-            'get': True,
-            'extra': {'additional_information': ['SMB']}
-        }
-    }
-    user_info = call('user.query', payload['query-filters'], payload['query-options'])
-    with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl:
-        entry = None
-        users = hdl.users()
-        for u in users:
-            if u['user'] != SMB_USER:
-                continue
-
-            entry = u
-            break
-
-        assert entry is not None, str(users)
-        rid = int(user_info['sid'].rsplit('-', 1)[1])
-        assert rid == entry['rid'], str(entry)
-
-
-def test_003_access_based_share_enum(setup_smb_user, setup_smb_share):
-    call('sharing.smb.setacl', {
-        'share_name': "RPC_TEST",
-        'share_acl': [
-            {
-                'ae_who_sid': 'S-1-5-32-544',
-                'ae_perm': 'FULL',
-                'ae_type': 'ALLOWED'
-            }
-        ]
-    })
-    results = call("sharing.smb.query")
-    with MS_RPC(username=SMB_USER, password=SMB_PWD) as hdl:
-        shares = hdl.shares()
-        assert len(shares) == 1, str({"enum": shares, "shares": results})
-
-
-def test_share_name_restricutions(setup_smb_share):
-    first_share = setup_smb_share['share']
-    ds_name = setup_smb_share['dataset']
-
-    for char in INVALID_SHARE_NAME_CHARACTERS:
-        # First try updating existing share's name
-        with pytest.raises(ValidationErrors) as ve:
-            call('sharing.smb.update', first_share['id'], {'name': f'CANARY{char}'})
-
-        assert 'Share name contains the following invalid characters' in ve.value.errors[0].errmsg
-
-        # Now try creating new share
-        with pytest.raises(ValidationErrors) as ve:
-            call('sharing.smb.create', {'path': os.path.join('/mnt', ds_name), 'name': f'CANARY{char}'})
-
-        assert 'Share name contains the following invalid characters' in ve.value.errors[0].errmsg
-
-
-    with pytest.raises(ValidationErrors) as ve:
-        call('sharing.smb.update', first_share['id'], {'name': 'CANARY\x85'})
-
-    assert 'Share name contains unicode control characters' in ve.value.errors[0].errmsg
-
-    with pytest.raises(ValidationErrors) as ve:
-        call('sharing.smb.create', {'path': os.path.join('/mnt', ds_name), 'name': 'CANARY\x85'})
-
-    assert 'Share name contains unicode control characters' in ve.value.errors[0].errmsg
diff --git a/tests/api2/test_430_smb_sharesec.py b/tests/api2/test_430_smb_sharesec.py
deleted file mode 100644
index 22b7673402045..0000000000000
--- a/tests/api2/test_430_smb_sharesec.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import pytest
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from middlewared.test.integration.assets.account import user as create_user
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call, client, ssh
-from functions import SSH_TEST
-from auto_config import user, password
-
-Guests = {
-    "domain": "BUILTIN",
-    "name": "Guests",
-    "sidtype": "ALIAS"
-}
-Admins = {
-    "domain": "BUILTIN",
-    "name": "Administrators",
-    "sidtype": "ALIAS"
-}
-Users = {
-    "domain": "BUILTIN",
-    "name": "Users",
-    "sidtype": "ALIAS"
-}
-
-
-@pytest.fixture(scope="module")
-def setup_smb_share():
-    with dataset(
-        "smb-sharesec",
-        {'share_type': 'SMB'},
-    ) as ds:
-        with smb_share(f'/mnt/{ds}', "my_sharesec") as share:
-            yield share
-
-
-@pytest.fixture(scope="module")
-def sharesec_user():
-    with create_user({
-        'username': 'sharesec_user',
-        'full_name': 'sharesec_user',
-        'smb': True,
-        'group_create': True,
-        'password': 'test1234',
-    }) as u:
-        yield u
-
-
-def test_initialize_share(setup_smb_share):
-    acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
-    assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
-    assert len(acl['share_acl']) == 1
-    assert acl['share_acl'][0]['ae_who_sid'] == 'S-1-1-0'
-    assert acl['share_acl'][0]['ae_perm'] == 'FULL'
-    assert acl['share_acl'][0]['ae_type'] == 'ALLOWED'
-
-
-def test_set_smb_acl_by_sid(setup_smb_share):
-    payload = {
-        'share_name': setup_smb_share['name'],
-        'share_acl': [
-            {
-                'ae_who_sid': 'S-1-5-32-545',
-                'ae_perm': 'FULL',
-                'ae_type': 'ALLOWED'
-            }
-        ]
-    }
-    acl_set = call('sharing.smb.setacl', payload)
-
-    assert payload['share_name'].casefold() == acl_set['share_name'].casefold()
-    assert payload['share_acl'][0]['ae_who_sid'] == acl_set['share_acl'][0]['ae_who_sid']
-    assert payload['share_acl'][0]['ae_perm'] == acl_set['share_acl'][0]['ae_perm']
-    assert payload['share_acl'][0]['ae_type'] == acl_set['share_acl'][0]['ae_type']
-    assert acl_set['share_acl'][0]['ae_who_id']['id_type'] == 'GROUP'
-
-    b64acl = call(
-        'datastore.query', 'sharing.cifs.share',
-        [['cifs_name', '=', setup_smb_share['name']]],
-        {'get': True}
-    )['cifs_share_acl']
-
-    assert b64acl != ""
-
-    call('smb.sharesec.synchronize_acls')
-
-    newb64acl = call(
-        'datastore.query', 'sharing.cifs.share',
-        [['cifs_name', '=', setup_smb_share['name']]],
-        {'get': True}
-    )['cifs_share_acl']
-
-    assert newb64acl == b64acl
-
-
-def test_set_smb_acl_by_unix_id(setup_smb_share, sharesec_user):
-    payload = {
-        'share_name': setup_smb_share['name'],
-        'share_acl': [
-            {
-                'ae_who_id': {'id_type': 'USER', 'id': sharesec_user['uid']},
-                'ae_perm': 'CHANGE',
-                'ae_type': 'ALLOWED'
-            }
-        ]
-    }
-    acl_set = call('sharing.smb.setacl', payload)
-
-    assert payload['share_name'].casefold() == acl_set['share_name'].casefold()
-    assert payload['share_acl'][0]['ae_perm'] == acl_set['share_acl'][0]['ae_perm']
-    assert payload['share_acl'][0]['ae_type'] == acl_set['share_acl'][0]['ae_type']
-    assert acl_set['share_acl'][0]['ae_who_id']['id_type'] == 'USER'
-    assert acl_set['share_acl'][0]['ae_who_id']['id'] == sharesec_user['uid']
-    assert acl_set['share_acl'][0]['ae_who_str'] == sharesec_user['username']
-
-
-def test_delete_share_info_tdb(setup_smb_share):
-    cmd = 'rm /var/db/system/samba4/share_info.tdb'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, results['output']
-
-    cmd = 'test -f /var/db/system/samba4/share_info.tdb'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is False, results['output']
-
-    acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
-    assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
-    assert acl['share_acl'][0]['ae_who_sid'] == 'S-1-1-0'
-
-
-def test_restore_sharesec_with_flush_share_info(setup_smb_share, sharesec_user):
-    with client() as c:
-        c.call('smb.sharesec.flush_share_info')
-
-    acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
-    assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
-    assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
-
-
-def test_verify_share_info_tdb_is_created(setup_smb_share, sharesec_user):
-    cmd = 'test -f /var/db/system/samba4/share_info.tdb'
-    results = SSH_TEST(cmd, user, password)
-    assert results['result'] is True, results['output']
-
-    # Get the initial ACL information
-    acl = call('sharing.smb.getacl', {'share_name': setup_smb_share['name']})
-    assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
-    assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
-
-    share = call('sharing.smb.query', [['id', '=', setup_smb_share['id']]], {'get': True})
-    assert share['name'] == setup_smb_share['name']
-
-    share = call('sharing.smb.update', setup_smb_share['id'], {'name': 'my_sharesec2'})
-    assert share['name'] == 'my_sharesec2'
-
-    acl = call('sharing.smb.getacl', {'share_name': 'my_sharesec2'})
-
-    setup_smb_share['name'] = 'my_sharesec2'
-    assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
-    assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username'], ssh('tdbdump /var/db/system/samba4/share_info.tdb') 
-
-
-def test_toggle_share_and_verify_acl_preserved(setup_smb_share, sharesec_user):
-    call('sharing.smb.update', setup_smb_share['id'], {"enabled": False})
-
-    call('sharing.smb.update', setup_smb_share['id'], {"enabled": True})
-
-    acl = call('sharing.smb.getacl', {'share_name': 'my_sharesec2'})
-    assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
-    assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
-
-    # Abusive test, bypass normal APIs for share and
-    # verify that sync_registry call still preserves info.
-    call('datastore.update', 'sharing.cifs.share', setup_smb_share['id'], {'cifs_enabled': False})
-
-    call('sharing.smb.sync_registry', job=True)
-
-    call('datastore.update', 'sharing.cifs.share', setup_smb_share['id'], {'cifs_enabled': True})
-
-    call('sharing.smb.sync_registry', job=True)
-
-    acl = call('sharing.smb.getacl', {'share_name': 'my_sharesec2'})
-    assert acl['share_name'].casefold() == setup_smb_share['name'].casefold()
-    assert acl['share_acl'][0]['ae_who_str'] == sharesec_user['username']
diff --git a/tests/api2/test_435_smb_registry.py b/tests/api2/test_435_smb_registry.py
deleted file mode 100644
index bfe6db31184bc..0000000000000
--- a/tests/api2/test_435_smb_registry.py
+++ /dev/null
@@ -1,415 +0,0 @@
-import contextlib
-import os
-import pytest
-
-from middlewared.service_exception import ValidationError
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call, ssh
-
-DATASET_NAME = 'smb-reg'
-SHARES = [f'REGISTRYTEST_{i}' for i in range(0, 5)]
-PRESETS = [
-    "DEFAULT_SHARE",
-    "ENHANCED_TIMEMACHINE",
-    "MULTI_PROTOCOL_NFS",
-    "PRIVATE_DATASETS",
-    "WORM_DROPBOX"
-]
-DETECTED_PRESETS = None
-
-"""
-Note: following sample auxiliary parameters and comments were
-provided by a community member for testing. They do not represent
-the opinion or recommendation of iXsystems.
-"""
-SAMPLE_AUX = [
-    'follow symlinks = yes ',
-    'veto files = /.windows/.mac/.zfs/',
-    '# needed explicitly for each share to prevent default being set',
-    'admin users = MY_ACCOUNT',
-    '## NOTES:', '',
-    "; aio-fork might cause smbd core dump/signal 6 in log in v11.1- see bug report [https://redmine.ixsystems.com/issues/27470]. Looks helpful but disabled until clear if it's responsible.", '', '',
-    '### VFS OBJECTS (shadow_copy2 not included if no periodic snaps, so do it manually)', '',
-    '# Include recycle, crossrename, and exclude readonly, as share=RW', '',
-    '#vfs objects = zfs_space zfsacl winmsa streams_xattr recycle shadow_copy2 crossrename aio_pthread', '',
-    'vfs objects = aio_pthread streams_xattr shadow_copy_zfs acl_xattr crossrename winmsa recycle', '',
-    '# testing without shadow_copy2', '',
-    'valid users = MY_ACCOUNT @ALLOWED_USERS',
-    'invalid users = root anonymous guest',
-    'hide dot files = yes',
-]
-
-SAMPLE_OPTIONS = [
-    'mangled names = no',
-    'dos charset = CP850',
-    'unix charset = UTF-8',
-    'strict sync = no',
-    '',
-    'min protocol = SMB2',
-    'vfs objects = fruit streams_xattr  ',
-    'fruit:model = MacSamba', 'fruit:posix_rename = yes ',
-    'fruit:veto_appledouble = no',
-    'fruit:wipe_intentionally_left_blank_rfork = yes ',
-    'fruit:delete_empty_adfiles = yes ',
-    '',
-    'fruit:locking=none',
-    'fruit:metadata=netatalk',
-    'fruit:resource=file',
-    'streams_xattr:prefix=user.',
-    'streams_xattr:store_stream_type=no',
-    'strict locking=auto',
-    '# oplocks=no  # breaks Time Machine',
-    ' level2 oplocks=no',
-    '# spotlight=yes  # invalid without further config'
-]
-
-
-@contextlib.contextmanager
-def create_smb_share(path, share_name, mkdir=False, options=None):
-    cr_opts = options or {}
-
-    if mkdir:
-        call('filesystem.mkdir', path)
-
-    with smb_share(path, share_name, cr_opts) as share:
-        yield share
-
-
-@contextlib.contextmanager
-def setup_smb_shares(mountpoint):
-    SHARE_DICT = {}
-
-    for share in SHARES:
-        share_path = os.path.join(mountpoint, share)
-        call('filesystem.mkdir', share_path)
-        new_share = call('sharing.smb.create', {
-            'comment': 'My Test SMB Share',
-            'name': share,
-            'home': False,
-            'path': share_path,
-        })
-        SHARE_DICT[share] = new_share['id']
-
-    try:
-        yield SHARE_DICT
-    finally:
-        for share_id in SHARE_DICT.values():
-            call('sharing.smb.delete', share_id)
-
-
-@pytest.fixture(scope='module')
-def setup_for_tests():
-    with dataset(DATASET_NAME, data={'share_type': 'SMB'}) as ds:
-        smb_registry_mp = os.path.join('/mnt', ds)
-        call('filesystem.setperm', {
-            'path': smb_registry_mp,
-            'mode': '777',
-            'options': {'stripacl': True, 'recursive': True}
-        }, job=True)
-
-        with setup_smb_shares(smb_registry_mp) as shares:
-            yield (smb_registry_mp, ds, shares)
-
-
-@pytest.fixture(scope='module')
-def share_presets():
-    yield call('sharing.smb.presets')
-
-
-def test__setup_for_tests(setup_for_tests):
-    reg_shares = call('sharing.smb.reg_listshares')
-    for share in SHARES:
-        assert share in reg_shares
-
-
-@pytest.mark.parametrize('smb_share', SHARES)
-def test__rename_shares(setup_for_tests, smb_share):
-    mp, ds, SHARE_DICT = setup_for_tests
-
-    call('sharing.smb.update', SHARE_DICT[smb_share], {
-        'name': f'NEW_{smb_share}'
-    })
-
-
-def test__renamed_shares_in_registry(setup_for_tests):
-    """
-    Share renames need to be explicitly tested because
-    it will actually result in share being removed from
-    registry and re-added with different name.
-    """
-    reg_shares = call('sharing.smb.reg_listshares')
-    for share in SHARES:
-        assert f'NEW_{share}' in reg_shares
-
-    assert len(reg_shares) == len(SHARES)
-
-
-def check_aux_param(param, share, expected, fruit_enable=False):
-    val = call('smb.getparm', param, share)
-    if param == 'vfs objects':
-        expected_vfs_objects = expected.split()
-        # We have to override someone's poor life choices and insert
-        # vfs_fruit so that they don't have mysteriously broken time
-        # machine shares
-        if fruit_enable:
-            expected_vfs_objects.append('fruit')
-
-        assert set(expected_vfs_objects) == set(val)
-    else:
-        assert val == expected
-
-
-@pytest.mark.parametrize('preset', PRESETS)
-def test__test_presets(setup_for_tests, share_presets, preset):
-    """
-    This test iterates through SMB share presets,
-    applies them to a single share, and then validates
-    that the preset was applied correctly.
-
-    In case of bool in API, simple check that appropriate
-    value is set in return from sharing.smb.update will
-    be sufficient. In case of auxiliary parameters, we
-    need to be a bit more thorough. The preset will not
-    be reflected in returned auxsmbconf and so we'll need
-    to directly reach out and run smb.getparm.
-    """
-    mp, ds, SHARE_DICT = setup_for_tests
-    if 'TIMEMACHINE' in preset:
-        call('smb.update', {'aapl_extensions': True})
-
-    to_test = share_presets[preset]['params']
-    to_test_aux = to_test['auxsmbconf']
-    new_conf = call('sharing.smb.update', SHARE_DICT['REGISTRYTEST_0'], {
-        'purpose': preset
-    })
-    for entry in to_test_aux.splitlines():
-        aux, val = entry.split('=', 1)
-        check_aux_param(aux.strip(), new_conf['name'], val.strip())
-
-    for k in to_test.keys():
-        if k == "auxsmbconf":
-            continue
-        assert to_test[k] == new_conf[k]
-
-
-def test__reset_smb(setup_for_tests):
-    """
-    Remove all parameters that might turn us into
-    a MacOS-style SMB server (fruit).
-    """
-    mp, ds, SHARE_DICT = setup_for_tests
-    call('sharing.smb.update', SHARE_DICT['REGISTRYTEST_0'], {
-        "purpose": "NO_PRESET",
-        "timemachine": False
-    })
-    call('smb.update', {'aapl_extensions': False})
-
-
-def test__test_aux_param_on_update(setup_for_tests):
-    SHARE_DICT = setup_for_tests[2]
-    share_id = SHARE_DICT['REGISTRYTEST_0']
-    share = call('sharing.smb.query', [['id', '=', share_id]], {'get': True})
-
-    old_aux = share['auxsmbconf']
-    results = call('sharing.smb.update', share_id, {
-        'auxsmbconf': '\n'.join(SAMPLE_AUX)
-    })
-    new_aux = results['auxsmbconf']
-    new_name = results['name']
-    ncomments_sent = 0
-    ncomments_recv = 0
-
-    for entry in old_aux.splitlines():
-        """
-        Verify that aux params from last preset applied
-        are still in effect. Parameters included in
-        SAMPLE_AUX will never be in a preset so risk of
-        collision is minimal.
-        """
-        aux, val = entry.split('=', 1)
-        check_aux_param(aux.strip(), new_name, val.strip())
-
-    for entry in new_aux.splitlines():
-        """
-        Verify that non-comment parameters were successfully
-        applied to the running configuration.
-        """
-        if not entry:
-            continue
-
-        if entry.startswith(('#', ';')):
-            ncomments_recv += 1
-            continue
-
-        aux, val = entry.split('=', 1)
-        check_aux_param(aux.strip(), new_name, val.strip())
-
-    """
-    Verify comments aren't being stripped on update
-    """
-    for entry in SAMPLE_AUX:
-        if entry.startswith(('#', ';')):
-            ncomments_sent += 1
-
-    assert ncomments_sent == ncomments_recv, new_aux
-
-
-@contextlib.contextmanager
-def setup_aapl_extensions(newvalue):
-    oldvalue = call('smb.config')['aapl_extensions']
-    try:
-        if oldvalue != newvalue:
-            call('smb.update', {'aapl_extensions': newvalue})
-        yield
-    finally:
-        if oldvalue != newvalue:
-            call('smb.update', {'aapl_extensions': oldvalue})
-
-
-@pytest.fixture(scope='function')
-def setup_tm_share(setup_for_tests):
-    share_name = 'AUX_CREATE'
-    path = os.path.join(setup_for_tests[0], share_name)
-    with setup_aapl_extensions(True):
-        with create_smb_share(path, share_name, True, {
-            "home": False,
-            "purpose": "ENHANCED_TIMEMACHINE",
-            "auxsmbconf": '\n'.join(SAMPLE_AUX)
-        }) as s:
-            yield s
-
-
-def test__test_aux_param_on_create(share_presets, setup_tm_share):
-    share = setup_tm_share
-    new_aux = share['auxsmbconf']
-    pre_aux = share_presets["ENHANCED_TIMEMACHINE"]["params"]["auxsmbconf"]
-    ncomments_sent = 0
-    ncomments_recv = 0
-
-    for entry in pre_aux.splitlines():
-        """
-        Verify that aux params from preset were applied
-        successfully to the running configuration.
-        """
-        aux, val = entry.split('=', 1)
-        check_aux_param(aux.strip(), share['name'], val.strip())
-
-    for entry in new_aux.splitlines():
-        """
-        Verify that non-comment parameters were successfully
-        applied to the running configuration.
-        """
-        if not entry:
-            continue
-
-        if entry.startswith(('#', ';')):
-            ncomments_recv += 1
-            continue
-
-        aux, val = entry.split('=', 1)
-        check_aux_param(aux.strip(), share['name'], val.strip(), True)
-
-    """
-    Verify comments aren't being stripped on update
-    """
-    for entry in SAMPLE_AUX:
-        if entry.startswith(('#', ';')):
-            ncomments_sent += 1
-
-    assert ncomments_sent == ncomments_recv, f'new: {new_aux}, sample: {SAMPLE_AUX}'
-
-
-def test__delete_shares(setup_for_tests):
-    SHARE_DICT = setup_for_tests[2]
-    for key in list(SHARE_DICT.keys()):
-        call('sharing.smb.delete', SHARE_DICT[key])
-        SHARE_DICT.pop(key)
-
-    reg_shares = call('sharing.smb.reg_listshares')
-    assert len(reg_shares) == 0, str(reg_shares)
-
-    share_count = call('sharing.smb.query', [], {'count': True})
-    assert share_count == 0
-
-
-"""
-Following battery of tests validate behavior of registry
-with regard to homes shares
-"""
-
-
-def test__create_homes_share(setup_for_tests):
-    mp, ds, share_dict = setup_for_tests
-    home_path = os.path.join(mp, 'HOME_SHARE')
-    call('filesystem.mkdir', home_path)
-
-    new_share = call('sharing.smb.create', {
-        "comment": "My Test SMB Share",
-        "path": home_path,
-        "home": True,
-        "purpose": "NO_PRESET",
-        "name": 'HOME_SHARE',
-    })
-    share_dict['HOME'] = new_share['id']
-
-    reg_shares = call('sharing.smb.reg_listshares')
-    assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
-
-
-def test__toggle_homes_share(setup_for_tests):
-    mp, ds, share_dict = setup_for_tests
-    try:
-        call('sharing.smb.update', share_dict['HOME'], {'home': False})
-        reg_shares = call('sharing.smb.reg_listshares')
-        assert not any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
-    finally:
-        call('sharing.smb.update', share_dict['HOME'], {'home': True})
-
-    reg_shares = call('sharing.smb.reg_listshares')
-    assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
-
-
-def test__registry_rebuild_homes(setup_for_tests):
-    """
-    Abusive test.
-    In this test we run behind middleware's back and
-    delete a our homes share from the registry, and then
-    attempt to rebuild by registry sync method. This
-    method is called (among other places) when the CIFS
-    service reloads.
-    """
-    ssh('net conf delshare HOMES')
-    call('service.reload', 'cifs')
-    reg_shares = call('sharing.smb.reg_listshares')
-    assert any(['homes'.casefold() == s.casefold() for s in reg_shares]), str(reg_shares)
-
-
-def test__test_smb_options():
-    """
-    Validate that user comments are preserved as-is
-    """
-    new_config = call('smb.update', {'smb_options': '\n'.join(SAMPLE_OPTIONS)})
-    assert new_config['smb_options'].splitlines() == SAMPLE_OPTIONS
-
-
-def test__test_invalid_share_aux_param_create(setup_for_tests):
-    init_share_count = call('sharing.smb.query', [], {'count': True})
-    with pytest.raises(ValidationError) as ve:
-        call('sharing.smb.create', {'name': 'FAIL', 'path': setup_for_tests[0], 'auxsmbconf': 'oplocks = canary'})
-
-    assert ve.value.attribute == 'sharingsmb_create.auxsmbconf'
-
-    assert init_share_count == call('sharing.smb.query', [], {'count': True})
-
-
-def test__test_invalid_share_aux_param_update(setup_for_tests):
-    this_share = call('sharing.smb.create', {'name': 'FAIL', 'path': setup_for_tests[0]})
-
-    try:
-        with pytest.raises(ValidationError) as ve:
-            call('sharing.smb.update', this_share['id'], {'auxsmbconf': 'oplocks = canary'})
-    finally:
-        call('sharing.smb.delete', this_share['id'])
-
-    assert ve.value.attribute == 'sharingsmb_update.auxsmbconf'
diff --git a/tests/api2/test_438_snapshots.py b/tests/api2/test_438_snapshots.py
deleted file mode 100644
index af016b72aa395..0000000000000
--- a/tests/api2/test_438_snapshots.py
+++ /dev/null
@@ -1,475 +0,0 @@
-from middlewared.test.integration.assets.pool import dataset, snapshot
-from auto_config import pool_name
-from middlewared.test.integration.utils import call
-
-
-def _verify_snapshot_keys_present(snap, expected, unexpected):
-    """
-    Verify that the snapshot returned by the query has the expected keys in its dict
-    and none of the unexpected ones.
-
-    :param snap: a dict containing snapshot data
-    :param expected: a list of strings, expected key names in the dict
-    :param unexpected: a list of strings, key names that should not be in the dict
-    """
-    assert set(expected).issubset(set(snap.keys())), f"Failed to get all expected keys: {snap.keys()}"
-    for key in unexpected:
-        assert key not in snap.keys(), f"Unexpectedly, was returned '{key}'"
-
-
-def _verify_snapshot_against_config(snap, dataset_id, snap_config):
-    """
-    Verify that the snapshot returned by the query has data that matches the data
-    returned then the dataset and snapshot were created.
-
-    :param snap: a dict containing snapshot data
-    :param dataset_id: dataset name
-    :param snap_config: a dict containing the snapshot data (when it was created)
-    """
-    assert snap['pool'] == dataset_id.split('/')[0], f"Incorrect pool: {snap}"
-    assert snap['name'] == snap_config['name'], f"Incorrect name: {snap}"
-    assert snap['type'] == "SNAPSHOT", f"Incorrect type: {snap}"
-    assert snap['snapshot_name'] == snap_config['snapshot_name'], f"Incorrect snapshot_name: {snap}"
-    assert snap['dataset'] == dataset_id, f"Incorrect dataset: {snap}"
-    assert snap['id'] == snap_config['id'], f"Incorrect id: {snap}"
-    assert isinstance(snap['createtxg'], str), f"Incorrect type for createtxg: {snap}"
-    assert snap['createtxg'] == snap_config['createtxg'], f"Incorrect createtxg: {snap}"
-
-
-def _verify_snapshot_properties(snap, properties_list):
-    """
-    Verify that the snapshot returned by the query has the expected items in its
-    'properties' value.
-
-    In the case of 'name' and 'createtxg' properties we perform additional checks
-    as this data should be present twice in snap.
-
-    :param snap: a dict containing snapshot data
-    :param properties_list: a list of strings, key names of properties that should
-    be present in snap['properties']
-    """
-    for prop in properties_list:
-        assert prop in snap['properties'], f"Missing property: {prop}"
-    # Special checking if name requested
-    if 'name' in properties_list:
-        assert snap['properties']['name']['value'] == snap['name'], f"Name property does not match {snap['properties']['name']}"
-    if 'createtxg' in properties_list:
-        assert snap['properties']['createtxg']['value'] == snap['createtxg'], f"createtxg property does not match {snap['properties']['name']}"
-
-#
-# Snapshot query: filter by dataset name
-#
-
-def _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list,
-        expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'],
-        unexpected_keys = ['properties']):
-    """
-    Perform snapshot queries, filtered by dataset name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    :expected_keys: a list of strings, the key names expected to be present in the snapshot dict
-    :unexpected_keys: a list of strings, the key names expected NOT to be present in the snapshot dict
-    """
-    with dataset(dataset_name) as dataset_id:
-        with snapshot(dataset_id, "snap01", get=True) as snap01_config:
-            payload = {
-                'query-filters': [["dataset", "=", dataset_id]],
-                'query-options': {
-                    'extra': {
-                        'properties': properties_list
-                    }
-                }
-            }
-            snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-            # Check that we have one snap returned and that it has the expected
-            # data
-            assert len(snaps) == 1
-            snap = snaps[0]
-            _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys)
-            _verify_snapshot_against_config(snap, dataset_id, snap01_config)
-            if 'properties' not in unexpected_keys:
-                _verify_snapshot_properties(snap, properties_list)
-
-            # Now create another snapshot and re-issue the query to check the
-            # new results.
-            with snapshot(dataset_id, "snap02", get=True) as snap02_config:
-                snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                # Check that we have two snaps returned and that they have the expected
-                # data.
-                assert len(snaps) == 2
-
-                # Need to sort the snaps by createtxg
-                ssnaps = sorted(snaps, key=lambda d: int(d['createtxg']))
-                snap01 = ssnaps[0]
-                snap02 = ssnaps[1]
-                _verify_snapshot_keys_present(snap01, expected_keys, unexpected_keys)
-                _verify_snapshot_against_config(snap01, dataset_id, snap01_config)
-                _verify_snapshot_keys_present(snap02, expected_keys, unexpected_keys)
-                _verify_snapshot_against_config(snap02, dataset_id, snap02_config)
-                if 'properties' not in unexpected_keys:
-                    _verify_snapshot_properties(snap01, properties_list)
-                    _verify_snapshot_properties(snap02, properties_list)
-
-                existing_snaps = {snap01['createtxg'], snap02['createtxg']}
-
-                # Now create *another* dataset and snapshot and ensure we
-                # only see the snapshots we're supposed to.
-                with dataset(f"{dataset_name}2") as dataset2:
-                    with snapshot(dataset2, "snap03", get=True) as snap03_config:
-                        # First issue the original query again & ensure we still have
-                        # the expected snapshots
-                        snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                        assert len(snaps) == 2
-                        for snap in snaps:
-                            assert snap['createtxg'] in existing_snaps, f"Got unexpected snap: {snap}"
-
-                        # Next issue the query with a different filter
-                        payload.update({
-                            'query-filters': [["dataset", "=", dataset2]]
-                            })
-                        snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                        assert len(snaps) == 1
-                        snap = snaps[0]
-                        assert snap['createtxg'] not in existing_snaps, f"Got unexpected snap: {snap}"
-                        new_snaps = {snap['createtxg']}
-                        _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys)
-                        _verify_snapshot_against_config(snap, dataset2, snap03_config)
-                        if 'properties' not in unexpected_keys:
-                            _verify_snapshot_properties(snap, properties_list)
-
-                        # Next issue the query with a bogus filter
-                        payload.update({
-                            'query-filters': [["dataset", "=", f"{dataset_name}-BOGUS"]]
-                            })
-                        snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                        assert len(snaps) == 0
-
-                        # Next issue the query WITHOUT a filter.  It's possible
-                        # that this test could be run while other snapshots are
-                        # present, so take that into account during checks, e.g.
-                        # assert count >= 3 rather than == 3
-                        payload.update({
-                            'query-filters': []
-                            })
-                        snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                        assert len(snaps) >= 3
-                        all_snaps = set([s['createtxg'] for s in snaps])
-                        assert existing_snaps.issubset(all_snaps), "Existing snaps not returned in filterless query"
-                        assert new_snaps.issubset(all_snaps), "New snaps not returned in filterless query"
-
-                    # Let the snap03 get cleaned up, and then ensure even with a filterless query
-                    # that it is no longer returned.
-                    snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                    assert len(snaps) >= 2
-                    all_snaps = set([s['createtxg'] for s in snaps])
-                    assert existing_snaps.issubset(all_snaps), "Existing snaps not returned in filterless query"
-                    assert not new_snaps.issubset(all_snaps), "New snaps returned in filterless query"
-
-
-def _test_simple_snapshot_query_filter_dataset(dataset_name, properties_list):
-    """
-    Perform simple snapshot queries, filtered by dataset name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    """
-    _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list,
-        expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'],
-        unexpected_keys = ['properties'])
-
-
-def _test_full_snapshot_query_filter_dataset(dataset_name, properties_list):
-    """
-    Perform non-simple (non fast-path) snapshot queries, filtered by dataset name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    """
-    _test_xxx_snapshot_query_filter_dataset(dataset_name, properties_list,
-        ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg', 'properties'],
-        [])
-
-
-def test_01_snapshot_query_filter_dataset_props_name(request):
-    """
-    Test snapshot query, filtered by dataset with properties option: 'name'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-name", ['name'])
-
-
-def test_02_snapshot_query_filter_dataset_props_createtxg(request):
-    """
-    Test snapshot query, filtered by dataset with properties option: 'createtxg'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['createtxg'])
-
-
-def test_03_snapshot_query_filter_dataset_props_name_createtxg(request):
-    """
-    Test snapshot query, filtered by dataset with properties option: 'name', 'createtxg'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-name-createtxg", ['name', 'createtxg'])
-    _test_simple_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg-name", ['createtxg', 'name'])
-
-
-def test_04_snapshot_query_filter_dataset_props_used(request):
-    """
-    Test snapshot query, filtered by dataset including properties option: 'used'
-
-    The results should be regular (NON fast-path) query that returns 'properties'.
-    """
-    _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used'])
-    _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used', 'name'])
-    _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used', 'name', 'createtxg'])
-    _test_full_snapshot_query_filter_dataset("ds-snapshot-simple-query-createtxg", ['used', 'createtxg'])
-
-
-#
-# Snapshot query: filter by snapshot name
-#
-
-def _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list, expected_keys, unexpected_keys):
-    """
-    Perform snapshot queries, filtered by snapshot name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    :expected_keys: a list of strings, the key names expected to be present in the snapshot dict
-    :unexpected_keys: a list of strings, the key names expected NOT to be present in the snapshot dict
-    """
-    with dataset(dataset_name) as dataset_id:
-        with snapshot(dataset_id, "snap01", get=True) as snap01_config:
-            with snapshot(dataset_id, "snap02", get=True) as snap02_config:
-                # Query snap01
-                payload = {
-                    'query-filters': [['name', '=', snap01_config['name']]],
-                    'query-options': {
-                        'extra': {
-                            'properties': properties_list
-                        }
-                    }
-                }
-                snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                # Check that we have one snap returned and that it has the expected
-                # data
-                assert len(snaps) == 1
-                snap = snaps[0]
-                _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys)
-                _verify_snapshot_against_config(snap, dataset_id, snap01_config)
-                if 'properties' not in unexpected_keys:
-                    _verify_snapshot_properties(snap, properties_list)
-
-                # Query snap02
-                payload = {
-                    'query-filters': [['name', '=', snap02_config['name']]],
-                    'query-options': {
-                        'extra': {
-                            'properties': properties_list
-                        }
-                    }
-                }
-                snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-                # Check that we have one snap returned and that it has the expected
-                # data
-                assert len(snaps) == 1
-                snap = snaps[0]
-                _verify_snapshot_keys_present(snap, expected_keys, unexpected_keys)
-                _verify_snapshot_against_config(snap, dataset_id, snap02_config)
-                if 'properties' not in unexpected_keys:
-                    _verify_snapshot_properties(snap, properties_list)
-
-            # Allow snap02 to be destroyed, then query again to make sure we don't get it
-            snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-            assert len(snaps) == 0
-
-
-def _test_simple_snapshot_query_filter_snapshot(dataset_name, properties_list):
-    """
-    Perform simple snapshot queries, filtered by snapshot name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    """
-    _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list,
-        expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'],
-        unexpected_keys = ['properties'])
-
-
-def _test_full_snapshot_query_filter_snapshot(dataset_name, properties_list):
-    """
-    Perform non-simple (non fast-path) snapshot queries, filtered by snapshot name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    """
-    _test_xxx_snapshot_query_filter_snapshot(dataset_name, properties_list,
-        ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg', 'properties'],
-        [])
-
-
-def test_05_snapshot_query_filter_snapshot_props_name(request):
-    """
-    Test snapshot query, filtered by snapshot with properties option: 'name'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-name", ['name'])
-
-
-def test_06_snapshot_query_filter_snapshot_props_createtxg(request):
-    """
-    Test snapshot query, filtered by snapshot with properties option: 'createtxg'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['createtxg'])
-
-
-def test_07_snapshot_query_filter_snapshot_props_name_createtxg(request):
-    """
-    Test snapshot query, filtered by snapshot with properties option: 'name', 'createtxg'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-name-createtxg", ['name', 'createtxg'])
-    _test_simple_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg-name", ['createtxg', 'name'])
-
-
-def test_08_snapshot_query_filter_snapshot_props_used(request):
-    """
-    Test snapshot query, filtered by snapshot including properties option: 'used'
-
-    The results should be regular (NON fast-path) query that returns 'properties'.
-    """
-    _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used'])
-    _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used', 'name'])
-    _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used', 'name', 'createtxg'])
-    _test_full_snapshot_query_filter_snapshot("ds-snapshot-simple-query-createtxg", ['used', 'createtxg'])
-
-
-#
-# Snapshot query: filter by pool name
-#
-
-def _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list, expected_keys, unexpected_keys):
-    """
-    Perform snapshot queries, filtered by pool name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    :expected_keys: a list of strings, the key names expected to be present in the snapshot dict
-    :unexpected_keys: a list of strings, the key names expected NOT to be present in the snapshot dict
-    """
-    with dataset(dataset_name) as dataset_id:
-        # Before we create any snapshots for this test, query snapshots
-        payload = {
-            'query-filters': [['pool', '=', pool_name]],
-            'query-options': {
-                'extra': {
-                    'properties': properties_list
-                }
-            }
-        }
-        snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-        original_snap_count = len(snaps)
-
-        with snapshot(dataset_id, "snap01", get=True) as snap01_config:
-            with snapshot(dataset_id, "snap02", get=True) as snap02_config:
-                # Query again
-                snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-
-                # Check that we have two additional snap returned and that
-                # they have the expected data
-                assert len(snaps) == original_snap_count+2
-                ssnaps = sorted(snaps, key=lambda d: int(d['createtxg']))
-                snap01 = ssnaps[-2]
-                snap02 = ssnaps[-1]
-                _verify_snapshot_keys_present(snap01, expected_keys, unexpected_keys)
-                _verify_snapshot_against_config(snap01, dataset_id, snap01_config)
-                _verify_snapshot_keys_present(snap02, expected_keys, unexpected_keys)
-                _verify_snapshot_against_config(snap02, dataset_id, snap02_config)
-                if 'properties' not in unexpected_keys:
-                    _verify_snapshot_properties(snap01, properties_list)
-                    _verify_snapshot_properties(snap02, properties_list)
-
-            # Allow snap02 to be destroyed & query again.
-            snaps = call("zfs.snapshot.query", payload["query-filters"], payload["query-options"])
-
-            assert len(snaps) == original_snap_count+1
-            ssnaps = sorted(snaps, key=lambda d: int(d['createtxg']))
-            snap01 = ssnaps[-1]
-            _verify_snapshot_keys_present(snap01, expected_keys, unexpected_keys)
-            _verify_snapshot_against_config(snap01, dataset_id, snap01_config)
-            if 'properties' not in unexpected_keys:
-                _verify_snapshot_properties(snap01, properties_list)
-
-
-def _test_simple_snapshot_query_filter_pool(dataset_name, properties_list):
-    """
-    Perform simple snapshot queries, filtered by pool name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    """
-    _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list,
-        expected_keys = ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg'],
-        unexpected_keys = ['properties'])
-
-
-def _test_full_snapshot_query_filter_pool(dataset_name, properties_list):
-    """
-    Perform non-simple (non fast-path) snapshot queries, filtered by pool name.
-
-    :param dataset_name: a string, the name of the dataset to be created and used in queries.
-    :param properties_list: a list of strings, the names to be queried in snapshot properties option
-    """
-    _test_xxx_snapshot_query_filter_pool(dataset_name, properties_list,
-        ['pool', 'name', 'type', 'snapshot_name', 'dataset', 'id', 'createtxg', 'properties'],
-        [])
-
-
-def test_09_snapshot_query_filter_pool_props_name(request):
-    """
-    Test snapshot query, filtered by pool with properties option: 'name'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-name", ['name'])
-
-
-def test_10_snapshot_query_filter_pool_props_createtxg(request):
-    """
-    Test snapshot query, filtered by pool with properties option: 'createtxg'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['createtxg'])
-
-
-def test_11_snapshot_query_filter_pool_props_name_createtxg(request):
-    """
-    Test snapshot query, filtered by pool with properties option: 'name', 'createtxg'
-
-    The results should be simple (fast-path) without 'properties'.
-    """
-    _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-name-createtxg", ['name', 'createtxg'])
-    _test_simple_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg-name", ['createtxg', 'name'])
-
-
-def test_12_snapshot_query_filter_pool_props_used(request):
-    """
-    Test snapshot query, filtered by pool including properties option: 'used'
-
-    The results should be regular (NON fast-path) query that returns 'properties'.
-    """
-    _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used'])
-    _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used', 'name'])
-    _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used', 'name', 'createtxg'])
-    _test_full_snapshot_query_filter_pool("ds-snapshot-simple-query-createtxg", ['used', 'createtxg'])
diff --git a/tests/api2/test_440_snmp.py b/tests/api2/test_440_snmp.py
deleted file mode 100644
index 9e3a36feb73ea..0000000000000
--- a/tests/api2/test_440_snmp.py
+++ /dev/null
@@ -1,445 +0,0 @@
-#!/usr/bin/env python3
-# License: BSD
-
-import os
-import pytest
-
-from time import sleep
-
-from contextlib import ExitStack
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.pool import dataset, snapshot
-from middlewared.test.integration.assets.filesystem import directory, mkfile
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-from middlewared.test.integration.utils.system import reset_systemd_svcs
-from pysnmp.hlapi import (CommunityData, ContextData, ObjectIdentity,
-                          ObjectType, SnmpEngine, UdpTransportTarget, getCmd)
-
-
-from auto_config import ha, interface, password, user, pool_name
-from functions import async_SSH_done, async_SSH_start
-
-skip_ha_tests = pytest.mark.skipif(not (ha and "virtual_ip" in os.environ), reason="Skip HA tests")
-COMMUNITY = 'public'
-TRAPS = False
-CONTACT = 'root@localhost.com'
-LOCATION = 'Maryville, TN'
-PASSWORD = 'testing1234'
-SNMP_USER_NAME = 'snmpJoe'
-SNMP_USER_AUTH = 'MD5'
-SNMP_USER_PWD = "abcd1234"
-SNMP_USER_PRIV = 'AES'
-SNMP_USER_PHRS = "A priv pass phrase"
-SNMP_USER_CONFIG = {
-    "v3": True,
-    "v3_username": SNMP_USER_NAME,
-    "v3_authtype": SNMP_USER_AUTH,
-    "v3_password": SNMP_USER_PWD,
-    "v3_privproto": SNMP_USER_PRIV,
-    "v3_privpassphrase": SNMP_USER_PHRS
-}
-
-
-EXPECTED_DEFAULT_CONFIG = {
-    "location": "",
-    "contact": "",
-    "traps": False,
-    "v3": False,
-    "community": "public",
-    "v3_username": "",
-    "v3_authtype": "SHA",
-    "v3_password": "",
-    "v3_privproto": None,
-    "v3_privpassphrase": None,
-    "options": "",
-    "loglevel": 3,
-    "zilstat": False
-}
-
-EXPECTED_DEFAULT_STATE = {
-    "enable": False,
-    "state": "STOPPED",
-}
-
-CMD_STATE = {
-    "RUNNING": "start",
-    "STOPPED": "stop"
-}
-
-
-# =====================================================================
-#                     Fixtures and utilities
-# =====================================================================
-@pytest.fixture(scope='module')
-def initialize_and_start_snmp():
-    """ Initialize and start SNMP """
-    try:
-        # Get initial config and start SNMP
-        orig_config = call('snmp.config')
-        call('service.start', 'snmp')
-        yield orig_config
-    finally:
-        # Restore default config (which will also delete any created user),
-        # stop SNMP and restore default enable state
-        call('snmp.update', EXPECTED_DEFAULT_CONFIG)
-        call(f'service.{CMD_STATE[EXPECTED_DEFAULT_STATE["state"]]}', 'snmp')
-        call('service.update', 'snmp', {"enable": EXPECTED_DEFAULT_STATE['enable']})
-
-
-@pytest.fixture(scope='class')
-def add_SNMPv3_user():
-    # Reset the systemd restart counter
-    reset_systemd_svcs("snmpd snmp-agent")
-
-    call('snmp.update', SNMP_USER_CONFIG)
-    assert get_systemctl_status('snmp-agent') == "RUNNING"
-
-    res = call('snmp.get_snmp_users')
-    assert SNMP_USER_NAME in res
-    yield
-
-
-@pytest.fixture(scope='function')
-def create_nested_structure():
-    """
-    Create the following structure:
-        tank -+-> dataset_1 -+-> dataset_2 -+-> dataset_3
-              |-> zvol_1a    |-> zvol-L_2a  |-> zvol L_3a
-              |-> zvol_1b    |-> zvol-L_2b  |-> zvol L_3b
-              |-> file_1     |-> file_2     |-> file_3
-              |-> dir_1      |-> dir_2      |-> dir_3
-    TODO: Make this generic and move to assets
-    """
-    ds_path = ""
-    ds_list = []
-    zv_list = []
-    dir_list = []
-    file_list = []
-    # Test '-' and ' ' in the name (we skip index 0)
-    zvol_name = ["bogus", "zvol", "zvol-L", "zvol L"]
-    with ExitStack() as es:
-
-        for i in range(1, 4):
-            preamble = f"{ds_path + '/' if i > 1 else ''}"
-            vol_path = f"{preamble}{zvol_name[i]}_{i}"
-
-            # Create zvols
-            for c in crange('a', 'b'):
-                zv = es.enter_context(dataset(vol_path + c, {"type": "VOLUME", "volsize": 1048576}))
-                zv_list.append(zv)
-
-            # Create directories
-            d = es.enter_context(directory(f"/mnt/{pool_name}/{preamble}dir_{i}"))
-            dir_list.append(d)
-
-            # Create files
-            f = es.enter_context(mkfile(f"/mnt/{pool_name}/{preamble}file_{i}", 1048576))
-            file_list.append(f)
-
-            # Create datasets
-            ds_path += f"{'/' if i > 1 else ''}dataset_{i}"
-            ds = es.enter_context(dataset(ds_path))
-            ds_list.append(ds)
-
-        yield {'zv': zv_list, 'ds': ds_list, 'dir': dir_list, 'file': file_list}
-
-
-def crange(c1, c2):
-    """
-    Generates the characters from `c1` to `c2`, inclusive.
-    Simple lowercase ascii only.
-    NOTE: Not safe for runtime code
-    """
-    ord_a = 97
-    ord_z = 122
-    c1_ord = ord(c1)
-    c2_ord = ord(c2)
-    assert c1_ord < c2_ord, f"'{c1}' must be 'less than' '{c2}'"
-    assert ord_a <= c1_ord <= ord_z
-    assert ord_a <= c2_ord <= ord_z
-    for c in range(c1_ord, c2_ord + 1):
-        yield chr(c)
-
-
-def get_systemctl_status(service):
-    """ Return 'RUNNING' or 'STOPPED' """
-    try:
-        res = ssh(f'systemctl status {service}')
-    except AssertionError:
-        # Return code is non-zero if service is not running
-        return "STOPPED"
-
-    action = [line for line in res.splitlines() if line.lstrip().startswith('Active')]
-    return "RUNNING" if action[0].split()[2] == "(running)" else "STOPPED"
-
-
-def get_sysname(hostip, community):
-    iterator = getCmd(SnmpEngine(),
-                      CommunityData(community),
-                      UdpTransportTarget((hostip, 161)),
-                      ContextData(),
-                      ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysName', 0)))
-    errorIndication, errorStatus, errorIndex, varBinds = next(iterator)
-    assert errorIndication is None, errorIndication
-    assert errorStatus == 0, errorStatus
-    value = str(varBinds[0])
-    _prefix = "SNMPv2-MIB::sysName.0 = "
-    assert value.startswith(_prefix), value
-    return value[len(_prefix):]
-
-
-def validate_snmp_get_sysname_uses_same_ip(hostip):
-    """Test that when we query a particular interface by SNMP the response comes from the same IP."""
-
-    # Write the test in a manner that is portable between Linux and FreeBSD ... which means
-    # *not* using 'any' as the interface name.  We will use the interface supplied by the
-    # test runner instead.
-    print(f"Testing {hostip} ", end='')
-    p = async_SSH_start(f"tcpdump -t -i {interface} -n udp port 161 -c2", user, password, hostip)
-    # Give some time so that the tcpdump has started before we proceed
-    sleep(5)
-
-    get_sysname(hostip, COMMUNITY)
-
-    # Now collect and process the tcpdump output
-    outs, errs = async_SSH_done(p, 20)
-    output = outs.strip()
-    assert len(output), f"No output from tcpdump:{outs}"
-    lines = output.split("\n")
-    assert len(lines) == 2, f"Unexpected number of lines output by tcpdump: {outs}"
-    for line in lines:
-        assert line.split()[0] == 'IP'
-    # print(errs)
-    get_dst = lines[0].split()[3].rstrip(':')
-    reply_src = lines[1].split()[1]
-    assert get_dst == reply_src
-    assert get_dst.endswith(".161")
-
-
-def user_list_users(snmp_config):
-    """Run an snmpwalk as a SNMP v3 user"""
-
-    add_cmd = None
-    if snmp_config['v3_privproto']:
-        authpriv_setting = 'authPriv'
-        add_cmd = f"-x {snmp_config['v3_privproto']} -X \"{snmp_config['v3_privpassphrase']}\" "
-    else:
-        authpriv_setting = 'authNoPriv'
-
-    cmd = f"snmpwalk -v3 -u  {snmp_config['v3_username']} -l {authpriv_setting} "
-    cmd += f"-a {snmp_config['v3_authtype']} -A {snmp_config['v3_password']} "
-    if add_cmd:
-        cmd += add_cmd
-    cmd += "localhost iso.3.6.1.6.3.15.1.2.2.1.3"
-
-    # This call will timeout if SNMP is not running
-    res = ssh(cmd)
-    return [x.split(':')[-1].strip(' \"') for x in res.splitlines()]
-
-
-def v2c_snmpwalk(mib):
-    """
-    Run snmpwalk with v2c protocol
-    mib is the item to be gathered.  mib format examples:
-        iso.3.6.1.6.3.15.1.2.2.1.3
-        1.3.6.1.4.1.50536.1.2
-    """
-    cmd = f"snmpwalk -v2c -cpublic localhost {mib}"
-
-    # This call will timeout if SNMP is not running
-    res = ssh(cmd)
-    return [x.split(':')[-1].strip(' \"') for x in res.splitlines()]
-
-
-# =====================================================================
-#                           Tests
-# =====================================================================
-class TestSNMP:
-
-    def test_configure_SNMP(self, initialize_and_start_snmp):
-        config = initialize_and_start_snmp
-
-        # We should be starting with the default config
-        # Check the hard way so that we can identify the culprit
-        for k, v in EXPECTED_DEFAULT_CONFIG.items():
-            assert config.get(k) == v, f'Expected {k}:"{v}", but found {k}:"{config.get(k)}"'
-
-        # Make some changes that will be checked in a later test
-        call('snmp.update', {
-            'community': COMMUNITY,
-            'traps': TRAPS,
-            'contact': CONTACT,
-            'location': LOCATION
-        })
-
-    def test_enable_SNMP_service_at_boot(self):
-        id = call('service.update', 'snmp', {'enable': True})
-        assert isinstance(id, int)
-
-        res = call('service.query', [['service', '=', 'snmp']])
-        assert res[0]['enable'] is True
-
-    def test_SNMP_service_is_running(self):
-        res = call('service.query', [['service', '=', 'snmp']])
-        assert res[0]['state'] == 'RUNNING'
-
-    def test_SNMP_settings_are_preserved(self):
-        data = call('snmp.config')
-        assert data['community'] == COMMUNITY
-        assert data['traps'] == TRAPS
-        assert data['contact'] == CONTACT
-        assert data['location'] == LOCATION
-
-    def test_sysname_reply_uses_same_ip(self):
-        validate_snmp_get_sysname_uses_same_ip(truenas_server.ip)
-
-    @skip_ha_tests
-    def test_ha_sysname_reply_uses_same_ip(self):
-        validate_snmp_get_sysname_uses_same_ip(truenas_server.ip)
-        validate_snmp_get_sysname_uses_same_ip(truenas_server.nodea_ip)
-        validate_snmp_get_sysname_uses_same_ip(truenas_server.nodeb_ip)
-
-    def test_SNMPv3_private_user(self):
-        """
-        The SNMP system user should always be available
-        """
-        # Reset the systemd restart counter
-        reset_systemd_svcs("snmpd snmp-agent")
-
-        # Make sure the createUser command is not present
-        res = ssh("tail -2 /var/lib/snmp/snmpd.conf")
-        assert 'createUser' not in res
-
-        # Make sure the SNMP system user is a rwuser
-        res = ssh("cat /etc/snmp/snmpd.conf")
-        assert "rwuser snmpSystemUser" in res
-
-        # List the SNMP users and confirm the system user
-        # This also confirms the functionality of the system user
-        res = call('snmp.get_snmp_users')
-        assert "snmpSystemUser" in res
-
-    @pytest.mark.parametrize('payload,attrib,errmsg', [
-        ({'v3': False, 'community': ''},
-            'snmp_update.community', 'This field is required when SNMPv3 is disabled'),
-        ({'v3': True},
-            'snmp_update.v3_username', 'This field is required when SNMPv3 is enabled'),
-        ({'v3_authtype': 'AES'},
-            'snmp_update.v3_authtype', 'Input should be'),
-        ({'v3': True, 'v3_authtype': 'MD5'},
-            'snmp_update.v3_username', 'This field is required when SNMPv3 is enabled'),
-        ({'v3_password': 'short'},
-            'snmp_update.v3_password', 'Password must contain at least 8 characters'),
-        ({'v3_privproto': 'SHA'},
-            'snmp_update.v3_privproto', 'Input should be'),
-        ({'v3_privproto': 'AES'},
-            'snmp_update.v3_privpassphrase', 'This field is required when SNMPv3 private protocol is specified'),
-    ])
-    def test_v3_validators(self, payload, attrib, errmsg):
-        """
-        All these configuration updates should fail.
-        """
-        with pytest.raises(ValidationErrors) as ve:
-            call('snmp.update', payload)
-        if attrib:
-            assert f"{attrib}" in ve.value.errors[0].attribute
-        if errmsg:
-            assert f"{errmsg}" in ve.value.errors[0].errmsg
-
-    @pytest.mark.usefixtures("add_SNMPv3_user")
-    class TestSNMPv3User:
-        def test_SNMPv3_user_function(self):
-            res = user_list_users(SNMP_USER_CONFIG)
-            assert SNMP_USER_NAME in res, f"Expected to find {SNMP_USER_NAME} in {res}"
-
-        def test_SNMPv3_user_retained_across_service_restart(self):
-            # Reset the systemd restart counter
-            reset_systemd_svcs("snmpd snmp-agent")
-
-            res = call('service.stop', 'snmp')
-            assert res is True
-            res = call('service.start', 'snmp')
-            assert res is True
-            res = call('snmp.get_snmp_users')
-            assert "snmpSystemUser" in res
-            assert SNMP_USER_NAME in res
-
-        def test_SNMPv3_user_retained_across_v3_disable(self):
-
-            # Disable and check
-            res = call('snmp.update', {'v3': False})
-            assert SNMP_USER_NAME in res['v3_username']
-            res = call('snmp.get_snmp_users')
-            assert SNMP_USER_NAME in res
-
-            # Enable and check
-            res = call('snmp.update', {'v3': True})
-            assert SNMP_USER_NAME in res['v3_username']
-            res = call('snmp.get_snmp_users')
-            assert SNMP_USER_NAME in res
-
-        @pytest.mark.parametrize('key,value', [
-            ('reset', ''),  # Reset systemd counters
-            ('v3_username', 'ixUser'),
-            ('v3_authtype', 'SHA'),
-            ('v3_password', 'SimplePassword'),
-            ('reset', ''),  # Reset systemd counters
-            ('v3_privproto', 'DES'),
-            ('v3_privpassphrase', 'Pass phrase with spaces'),
-            # Restore original user name
-            ('v3_username', SNMP_USER_NAME)
-        ])
-        def test_SNMPv3_user_changes(self, key, value):
-            """
-            Make changes to the SNMPv3 user name, password, etc. and confirm user function.
-            This also tests a pass phrase that includes spaces.
-            NOTE: We include systemd counter resets because these calls require the most restarts.
-            """
-            if key == 'reset':
-                # Reset the systemd restart counter
-                reset_systemd_svcs("snmpd snmp-agent")
-            else:
-                res = call('snmp.update', {key: value})
-                assert value in res[key]
-                assert get_systemctl_status('snmp-agent') == "RUNNING"
-
-                # Confirm user function after change
-                user_config = call('snmp.config')
-                res = user_list_users(user_config)
-                assert user_config['v3_username'] in res
-
-        def test_SNMPv3_user_delete(self):
-
-            # Make sure the user is currently present
-            res = call('snmp.get_snmp_users')
-            assert SNMP_USER_NAME in res
-
-            res = call('snmp.update', {'v3': False, 'v3_username': ''})
-            # v3_authtype is defaulted to 'SHA' in the DB
-            assert not any([res['v3'], res['v3_username'], res['v3_password'],
-                            res['v3_privproto'], res['v3_privpassphrase']]) and 'SHA' in res['v3_authtype']
-            assert get_systemctl_status('snmp-agent') == "RUNNING"
-
-            res = call('snmp.get_snmp_users')
-            assert SNMP_USER_NAME not in res
-
-            # Make sure the user cannot perform SNMP requests
-            with pytest.raises(Exception) as ve:
-                res = user_list_users(SNMP_USER_CONFIG)
-            assert "Unknown user name" in str(ve.value)
-
-    def test_zvol_reporting(self, create_nested_structure):
-        """
-        The TrueNAS snmp agent should list all zvols.
-        TrueNAS zvols can be created on any ZFS pool or dataset.
-        The snmp agent should list them all.
-        snmpwalk -v2c -cpublic localhost 1.3.6.1.4.1.50536.1.2.1.1.2
-        """
-        # The expectation is that the snmp agent should list exactly the six zvols.
-        created_items = create_nested_structure
-
-        # Include a snapshot of one of the zvols
-        with snapshot(created_items['zv'][0], "snmpsnap01"):
-            snmp_res = v2c_snmpwalk('1.3.6.1.4.1.50536.1.2.1.1.2')
-            assert all(v in created_items['zv'] for v in snmp_res), f"expected {created_items['zv']}, but found {snmp_res}"
diff --git a/tests/api2/test_475_syslog.py b/tests/api2/test_475_syslog.py
deleted file mode 100644
index 57b5079287cdb..0000000000000
--- a/tests/api2/test_475_syslog.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from time import sleep
-
-import pytest
-from auto_config import password, user
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-
-
-def do_syslog(ident, message, facility='syslog.LOG_USER', priority='syslog.LOG_INFO'):
-    """
-    This generates a syslog message on the TrueNAS server we're currently testing.
-    We don't need to override IP addr or creds because we are not a syslog target.
-    """
-    cmd = 'python3 -c "import syslog;'
-    cmd += f'syslog.openlog(ident=\\\"{ident}\\\", facility={facility});'
-    cmd += f'syslog.syslog({priority},\\\"{message}\\\");syslog.closelog()"'
-    ssh(cmd)
-
-
-def check_syslog(log_path, message, target_ip=None, target_user=user, target_passwd=password, timeout=30):
-    """
-    Common function to check whether a particular message exists in a log file.
-    This will be used to check local and remote syslog servers.
-
-    Current implementation performs simple grep through the log file, and so
-    onus is on test developer to not under-specify `message` in order to avoid
-    false positives.
-    """
-    target_ip = target_ip or truenas_server.ip
-    sleep_time = 1
-    while timeout > 0:
-        found = ssh(
-            f'grep -R "{message}" {log_path}',
-            check=False,
-            user=target_user,
-            password=target_passwd,
-            ip=target_ip
-        )
-        if not found:
-            sleep(sleep_time)
-            timeout -= sleep_time
-        else:
-            return found
-
-
-@pytest.mark.parametrize('params', [
-    {
-        'ident': 'iscsi-scstd',
-        'msg': 'ZZZZ: random scst test',
-        'path': '/var/log/scst.log',
-    },
-    {
-        'ident': 'iscsi-scstd',
-        'msg': 'ZZZZ: random scst test',
-        'path': '/var/log/scst.log',  # This is just to make sure our exclude filter works as intended
-    },
-])
-def test_local_syslog_filter(request, params):
-    """
-    This test validates that our syslog-ng filters are correctly placing
-    messages into their respective paths in /var/log
-    """
-    do_syslog(
-        params['ident'],
-        params['msg'],
-        params.get('facility', 'syslog.LOG_USER'),
-        params.get('priority', 'syslog.LOG_INFO')
-    )
-    assert check_syslog(params['path'], params['msg'], timeout=10)
-
-
-@pytest.mark.parametrize('log_path', [
-    '/var/log/messages',
-    '/var/log/syslog',
-    '/var/log/daemon.log'
-])
-def test_filter_leak(request, log_path):
-    """
-    This test validates that our exclude filter works properly and that
-    particularly spammy applications aren't polluting useful logs.
-    """
-    results = ssh(f'grep -R "ZZZZ:" {log_path}', complete_response=True, check=False)
-    assert results['result'] is False, str(results['result'])
-
-
-def test_07_check_can_set_remote_syslog(request):
-    """
-    Basic test to validate that setting a remote syslog target
-    doesn't break syslog-ng config
-    """
-    try:
-        data = call('system.advanced.update', {'syslogserver': '127.0.0.1'})
-        assert data['syslogserver'] == '127.0.0.1'
-        call('service.restart', 'syslogd', {'silent': False})
-    finally:
-        call('system.advanced.update', {'syslogserver': ''})
diff --git a/tests/api2/test_530_ups.py b/tests/api2/test_530_ups.py
deleted file mode 100644
index 765aa064dc0ac..0000000000000
--- a/tests/api2/test_530_ups.py
+++ /dev/null
@@ -1,237 +0,0 @@
-import os
-from tempfile import NamedTemporaryFile
-from time import sleep
-
-import pytest
-
-from assets.websocket.service import ensure_service_enabled, ensure_service_started
-from auto_config import password, user
-from functions import send_file
-
-from middlewared.test.integration.utils import call, mock, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-DUMMY_FAKEDATA_DEV = '/tmp/fakedata.dev'
-SHUTDOWN_MARKER_FILE = '/tmp/is_shutdown'
-
-first_ups_payload = {
-    'rmonitor': True,
-    'mode': 'MASTER',
-    'shutdown': 'BATT',
-    'port': '655',
-    'remotehost': '127.0.0.1',
-    'identifier': 'ups',
-    'driver': 'usbhid-ups$PROTECT NAS',
-    'monpwd': 'mypassword'
-}
-
-second_ups_payload = {
-    'rmonitor': False,
-    'mode': 'SLAVE',
-    'shutdown': 'LOWBATT',
-    'port': '65535',
-    'identifier': 'foo',
-    'monpwd': 'secondpassword'
-}
-
-default_dummy_data = {
-    'battery.charge': 100,
-    'driver.parameter.pollinterval': 2,
-    'input.frequency': 49.9,
-    'input.frequency.nominal': 50.0,
-    'input.voltage': 230,
-    'input.voltage.nominal': 240,
-    'ups.status': 'OL',
-    'ups.timer.shutdown': -1,
-    'ups.timer.start': -1,
-}
-
-
-def get_service_state():
-    return call('service.query', [['service', '=', 'ups']], {'get': True})
-
-
-def remove_file(filepath):
-    ssh(f'rm {filepath}', check=False)
-
-
-def did_shutdown():
-    return ssh(f'cat {SHUTDOWN_MARKER_FILE}', check=False) == "done\n"
-
-
-def write_fake_data(data=None):
-    data = data or {}
-    all_data = default_dummy_data | data
-    with NamedTemporaryFile() as f:
-        for k, v in all_data.items():
-            f.write(f'{k}: {v}\n'.encode('utf-8'))
-        f.flush()
-        os.fchmod(f.fileno(), 0o644)
-        results = send_file(f.name, DUMMY_FAKEDATA_DEV, user, password, truenas_server.ip)
-        assert results['result'], str(results['output'])
-
-
-def wait_for_alert(klass, retries=10):
-    assert retries > 0
-    while retries:
-        alerts = call('alert.list')
-        for alert in alerts:
-            if alert['klass'] == klass:
-                return alert
-        sleep(1)
-        retries -= 1
-
-
-@pytest.fixture(scope='module')
-def ups_running():
-    with ensure_service_enabled('ups'):
-        with ensure_service_started('ups'):
-            yield
-
-
-@pytest.fixture(scope='module')
-def dummy_ups_driver_configured():
-    write_fake_data()
-    remove_file(SHUTDOWN_MARKER_FILE)
-    old_config = call('ups.config')
-    del old_config['complete_identifier']
-    del old_config['id']
-    payload = {
-        'mode': 'MASTER',
-        'driver': 'dummy-ups',
-        'port': DUMMY_FAKEDATA_DEV,
-        'description': 'dummy-ups in dummy-once mode',
-        'shutdowncmd': f'echo done > {SHUTDOWN_MARKER_FILE}'
-    }
-    with mock('ups.driver_choices', return_value={'dummy-ups': 'Driver for multi-purpose UPS emulation',
-                                                  'usbhid-ups$PROTECT NAS': 'AEG Power Solutions ups 3 PROTECT NAS (usbhid-ups)'}):
-        call('ups.update', payload)
-        try:
-            yield
-        finally:
-            call('ups.update', old_config)
-            remove_file(SHUTDOWN_MARKER_FILE)
-            remove_file(DUMMY_FAKEDATA_DEV)
-
-
-def test__enable_ups_service():
-    results = get_service_state()
-    assert results['state'] == 'STOPPED', results
-    assert results['enable'] is False, results
-    call('service.update', 'ups', {'enable': True})
-    results = get_service_state()
-    assert results['enable'] is True, results
-
-
-def test__set_ups_options():
-    results = call('ups.update', first_ups_payload)
-    for data in first_ups_payload.keys():
-        assert first_ups_payload[data] == results[data], results
-
-
-def test__start_ups_service():
-    call('service.start', 'ups')
-    results = get_service_state()
-    assert results['state'] == 'RUNNING', results
-
-
-def test__get_reports_configuration_as_saved():
-    results = call('ups.config')
-    for data in first_ups_payload.keys():
-        assert first_ups_payload[data] == results[data], results
-
-
-def test__change_ups_options_while_service_is_running():
-    payload = {
-        'port': '65545',
-        'identifier': 'boo'
-    }
-    results = call('ups.update', payload)
-    for data in ['port', 'identifier']:
-        assert payload[data] == results[data], results
-    results = call('ups.config')
-    for data in ['port', 'identifier']:
-        assert payload[data] == results[data], results
-
-
-def test__stop_ups_service():
-    results = get_service_state()
-    assert results['state'] == 'RUNNING', results
-    call('service.stop', 'ups')
-    results = get_service_state()
-    assert results['state'] == 'STOPPED', results
-
-
-def test__change_ups_options():
-    results = call('ups.update', second_ups_payload)
-    for data in second_ups_payload.keys():
-        assert second_ups_payload[data] == results[data], results
-    call('service.start', 'ups')
-    results = get_service_state()
-    assert results['state'] == 'RUNNING', results
-    results = call('ups.config')
-    for data in second_ups_payload.keys():
-        assert second_ups_payload[data] == results[data], results
-
-
-def test__get_ups_driver_choice():
-    results = call('ups.driver_choices')
-    assert isinstance(results, dict) is True, results
-
-
-def test__get_ups_port_choice():
-    results = call('ups.port_choices')
-    assert isinstance(results, list) is True, results
-    assert isinstance(results[0], str) is True, results
-
-
-def test__disable_and_stop_ups_service():
-    call('service.update', 'ups', {'enable': False})
-    results = get_service_state()
-    assert results['enable'] is False, results
-    call('service.stop', 'ups')
-    results = get_service_state()
-    assert results['state'] == 'STOPPED', results
-
-
-def test__ups_online_to_online_lowbattery(ups_running, dummy_ups_driver_configured):
-    results = get_service_state()
-    assert results['state'] == 'RUNNING', results
-    sleep(2)
-    assert 'UPSBatteryLow' not in [alert['klass'] for alert in call('alert.list')]
-    write_fake_data({'battery.charge': 20, 'ups.status': 'OL LB'})
-    alert = wait_for_alert('UPSBatteryLow')
-    assert alert
-    assert 'battery.charge: 20' in alert['formatted'], alert
-    assert not did_shutdown()
-
-
-def test__ups_online_to_onbatt(ups_running, dummy_ups_driver_configured):
-    assert 'UPSOnBattery' not in [alert['klass'] for alert in call('alert.list')]
-    write_fake_data({'battery.charge': 40, 'ups.status': 'OB'})
-    alert = wait_for_alert('UPSOnBattery')
-    assert alert
-    assert 'battery.charge: 40' in alert['formatted'], alert
-    assert not did_shutdown()
-
-
-def test__ups_onbatt_to_online(ups_running, dummy_ups_driver_configured):
-    assert 'UPSOnline' not in [alert['klass'] for alert in call('alert.list')]
-    write_fake_data({'battery.charge': 100, 'ups.status': 'OL'})
-    alert = wait_for_alert('UPSOnline')
-    assert alert
-    assert 'battery.charge: 100' in alert['formatted'], alert
-    assert not did_shutdown()
-
-
-def test__ups_online_to_onbatt_lowbattery(ups_running, dummy_ups_driver_configured):
-    assert 'UPSOnBattery' not in [alert['klass'] for alert in call('alert.list')]
-    write_fake_data({'battery.charge': 90, 'ups.status': 'OB'})
-    alert = wait_for_alert('UPSOnBattery')
-    assert alert
-    assert 'battery.charge: 90' in alert['formatted'], alert
-    write_fake_data({'battery.charge': 10, 'ups.status': 'OB LB'})
-    alert = wait_for_alert('UPSBatteryLow')
-    assert alert
-    assert 'battery.charge: 10' in alert['formatted'], alert
-    assert did_shutdown()
diff --git a/tests/api2/test_541_vm.py b/tests/api2/test_541_vm.py
deleted file mode 100644
index a51d253ffb83e..0000000000000
--- a/tests/api2/test_541_vm.py
+++ /dev/null
@@ -1,268 +0,0 @@
-import dataclasses
-import time
-
-import pytest
-from pytest_dependency import depends
-
-from auto_config import pool_name
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.pool import dataset
-
-
-@dataclasses.dataclass
-class VmAssets:
-    # probably best to keep this module
-    # to only creating 1 VM, since the
-    # functionality can be tested on 1
-    # and creating > 1 nested VMs incurs
-    # an ever-increasing perf penalty in
-    # test infrastructure
-    VM_NAMES = ['vm01']
-    VM_INFO = dict()
-    VM_DEVICES = dict()
-
-
-@pytest.mark.dependency(name='VIRT_SUPPORTED')
-def test_001_is_virtualization_supported():
-    if not call('vm.virtualization_details')['supported']:
-        pytest.skip('Virtualization not supported')
-    elif call('failover.licensed'):
-        pytest.skip('Virtualization not supported on HA')
-
-
-@pytest.mark.parametrize(
-    'info',
-    [
-        {'method': 'vm.flags', 'type': dict, 'keys': ('intel_vmx', 'amd_rvi')},
-        {'method': 'vm.cpu_model_choices', 'type': dict, 'keys': ('EPYC',)},
-        {'method': 'vm.bootloader_options', 'type': dict, 'keys': ('UEFI', 'UEFI_CSM')},
-        {'method': 'vm.get_available_memory', 'type': int},
-        {'method': 'vm.guest_architecture_and_machine_choices', 'type': dict, 'keys': ('i686', 'x86_64')},
-        {'method': 'vm.maximum_supported_vcpus', 'type': int},
-        {'method': 'vm.port_wizard', 'type': dict, 'keys': ('port', 'web')},
-        {'method': 'vm.random_mac', 'type': str},
-        {'method': 'vm.resolution_choices', 'type': dict, 'keys': ('1920x1200', '640x480')},
-        {'method': 'vm.device.bind_choices', 'type': dict, 'keys': ('0.0.0.0', '::')},
-        {'method': 'vm.device.iommu_enabled', 'type': bool},
-        {'method': 'vm.device.iotype_choices', 'type': dict, 'keys': ('NATIVE',)},
-        {'method': 'vm.device.nic_attach_choices', 'type': dict},
-        {'method': 'vm.device.usb_controller_choices', 'type': dict, 'keys': ('qemu-xhci',)},
-        {'method': 'vm.device.usb_passthrough_choices', 'type': dict},
-        {'method': 'vm.device.passthrough_device_choices', 'type': dict},
-        {'method': 'vm.device.pptdev_choices', 'type': dict}
-    ],
-    ids=lambda x: x['method']
-)
-def test_002_vm_endpoint(info, request):
-    """
-    Very basic behavior of various VM endpoints. Ensures they
-    return without error and that the type of response is what
-    we expect. If a dict is returned, we check that top-level
-    keys exist
-    """
-    depends(request, ['VIRT_SUPPORTED'])
-    rv = call(info['method'])
-    assert isinstance(rv, info['type'])
-    if (keys := info.get('keys')):
-        assert all((i in rv for i in keys))
-
-
-@pytest.mark.parametrize('disk_name', ['test zvol'])
-def test_003_verify_disk_choice(disk_name):
-    with dataset(disk_name, {'type': 'VOLUME', 'volsize': 1048576, 'sparse': True}) as ds:
-        assert call('vm.device.disk_choices').get(f'/dev/zvol/{ds.replace(" ", "+")}') == ds
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-@pytest.mark.dependency(name='VM_CREATED')
-def test_010_create_vm(vm_name, request):
-    depends(request, ['VIRT_SUPPORTED'])
-    vm_payload = {
-        'name': vm_name,
-        'description': f'{vm_name} description',
-        'vcpus': 1,
-        'memory': 512,
-        'bootloader': 'UEFI',
-        'autostart': False,
-    }
-    vm = call('vm.create', vm_payload)
-    qry = call('vm.query', [['id', '=', vm['id']]], {'get': True})
-    assert all((vm_payload[key] == qry[key] for key in vm_payload))
-    VmAssets.VM_INFO.update({qry['name']: {'query_response': qry}})
-
-
-@pytest.mark.parametrize('device', ['DISK', 'DISPLAY', 'NIC'])
-@pytest.mark.dependency(name='ADD_DEVICES_TO_VM')
-def test_011_add_devices_to_vm(device, request):
-    depends(request, ['VM_CREATED'])
-    for vm_name, info in VmAssets.VM_INFO.items():
-        if vm_name not in VmAssets.VM_DEVICES:
-            VmAssets.VM_DEVICES[vm_name] = dict()
-
-        dev_info = {
-            'vm': info['query_response']['id'],
-            'attributes': {
-                'dtype': device,
-            }
-        }
-        if device == 'DISK':
-            zvol_name = f'{pool_name}/{device}_for_{vm_name}'
-            dev_info['attributes'].update({
-                'dtype': device,
-                'create_zvol': True,
-                'zvol_name': zvol_name,
-                'zvol_volsize': 1048576
-            })
-        elif device == 'DISPLAY':
-            dev_info['attributes'].update({'resolution': '1024x768', 'password': 'displaypw'})
-        elif device == 'NIC':
-            for nic_name in call('vm.device.nic_attach_choices'):
-                dev_info['attributes'].update({'nic_attach': nic_name, 'dtype': device})
-                break
-        else:
-            assert False, f'Unhandled device type: ({device!r})'
-
-        info = call('vm.device.create', dev_info)
-        VmAssets.VM_DEVICES[vm_name].update({device: info})
-        # only adding these devices to 1 VM
-        break
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-def test_012_verify_devices_for_vm(vm_name, request):
-    depends(request, ['ADD_DEVICES_TO_VM'])
-    for device, info in VmAssets.VM_DEVICES[vm_name].items():
-        qry = call('vm.device.query', [['id', '=', info['id']]], {'get': True})
-        assert qry['vm'] == VmAssets.VM_INFO[vm_name]['query_response']['id']
-        assert qry['attributes'] == VmAssets.VM_DEVICES[vm_name][device]['attributes']
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-def test_013_delete_vm_devices(vm_name, request):
-    depends(request, ['ADD_DEVICES_TO_VM'])
-    for device, info in VmAssets.VM_DEVICES[vm_name].items():
-        opts = {}
-        if device == 'DISK':
-            opts = {'zvol': True}
-
-        call('vm.device.delete', info['id'], opts)
-        assert not call('vm.device.query', [['id', '=', info['id']]])
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-@pytest.mark.dependency(name='VM_STARTED')
-def test_014_start_vm(vm_name, request):
-    depends(request, ['VM_CREATED'])
-    _id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    call('vm.start', _id)
-    vm_status = call('vm.status', _id)
-    assert all((vm_status[key] == 'RUNNING' for key in ('state', 'domain_state')))
-    assert all((vm_status['pid'], isinstance(vm_status['pid'], int)))
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-def test_015_query_vm_info(vm_name, request):
-    depends(request, ['VIRT_SUPPORTED', 'VM_CREATED', 'VM_STARTED'])
-    _id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    vm_string = f'{_id}_{vm_name}'
-    assert call('vm.get_console', _id) == vm_string
-    assert vm_string in call('vm.log_file_path', _id)
-
-    mem_keys = ('RNP', 'PRD', 'RPRD')
-    mem_info = call('vm.get_vmemory_in_use')
-    assert isinstance(mem_info, dict)
-    assert all((key in mem_info for key in mem_keys))
-    assert all((isinstance(mem_info[key], int) for key in mem_keys))
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-@pytest.mark.dependency(name='VM_SUSPENDED')
-def test_020_suspend_vm(vm_name, request):
-    depends(request, ['VIRT_SUPPORTED', 'VM_CREATED', 'VM_STARTED'])
-    _id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    call('vm.suspend', _id)
-    for retry in range(1, 4):
-        status = call('vm.status', _id)
-        if all((status['state'] == 'SUSPENDED', status['domain_state'] == 'PAUSED')):
-            break
-        else:
-            time.sleep(1)
-    else:
-        assert False, f'Timed out after {retry} seconds waiting on {vm_name!r} to suspend'
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-@pytest.mark.dependency(name='VM_RESUMED')
-def test_021_resume_vm(vm_name, request):
-    depends(request, ['VM_SUSPENDED'])
-    _id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    call('vm.resume', _id)
-    for retry in range(1, 4):
-        status = call('vm.status', _id)
-        if all((status['state'] == 'RUNNING', status['domain_state'] == 'RUNNING')):
-            break
-        else:
-            time.sleep(1)
-    else:
-        assert False, f'Timed out after {retry} seconds waiting on {vm_name!r} to resume'
-
-@pytest.mark.skip(reason='Takes > 60 seconds and is flaky')
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-@pytest.mark.dependency(name='VM_RESTARTED')
-def test_022_restart_vm(vm_name, request):
-    depends(request, ['VM_RESUMED'])
-    _id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    call('vm.restart', _id, job=True)
-    status = call('vm.status', _id)
-    assert all((status['state'] == 'RUNNING', status['domain_state'] == 'RUNNING'))
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-@pytest.mark.dependency(name='VM_POWERED_OFF')
-def test_023_poweroff_vm(vm_name, request):
-    depends(request, ['VM_RESUMED'])
-    _id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    call('vm.poweroff', _id)
-    for retry in range(1, 4):
-        status = call('vm.status', _id)
-        if all((status['state'] == 'STOPPED', status['domain_state'] == 'SHUTOFF')):
-            break
-        else:
-            time.sleep(1)
-    else:
-        assert False, f'Timed out after {retry} seconds waiting on {vm_name!r} to poweroff'
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-@pytest.mark.dependency(name='VM_UPDATED')
-def test_024_update_powered_off_vm(vm_name, request):
-    depends(request, ['VM_POWERED_OFF'])
-    _id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    new_mem = 768
-    call('vm.update', _id, {'memory': new_mem})
-    assert call('vm.query', [['id', '=', _id]], {'get': True})['memory'] == new_mem
-
-
-@pytest.mark.parametrize('vm_name', VmAssets.VM_NAMES)
-def test_024_clone_powered_off_vm(vm_name, request):
-    depends(request, ['VM_POWERED_OFF'])
-    to_clone_id = VmAssets.VM_INFO[vm_name]['query_response']['id']
-    new_name = f'{vm_name}_clone'
-    call('vm.clone', to_clone_id, new_name)
-    qry = call('vm.query', [['name', '=', new_name]], {'get': True})
-    VmAssets.VM_INFO.update({new_name: {'query_response': qry}})
-    assert call('vm.get_console', qry['id']) == f'{qry["id"]}_{new_name}'
-
-    VmAssets.VM_DEVICES.update({new_name: dict()})
-    for dev in call('vm.device.query', [['vm', '=', qry['id']]]):
-        if dev['attributes']['dtype'] in ('DISK', 'NIC', 'DEVICE'):
-            # add this to VM_DEVICES so we properly clean-up after
-            # the test module runs
-            VmAssets.VM_DEVICES[new_name].update({dev['dtype']: dev})
-
-
-def test_025_cleanup_vms(request):
-    depends(request, ['VM_POWERED_OFF'])
-    for vm in call('vm.query'):
-        call('vm.delete', vm['id'])
-        assert not call('vm.query', [['name', '=', vm['id']]])
diff --git a/tests/api2/test_900_docs.py b/tests/api2/test_900_docs.py
deleted file mode 100644
index a3a128f4520b6..0000000000000
--- a/tests/api2/test_900_docs.py
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env python3
-
-# License: BSD
-
-import pytest
-import sys
-import os
-from pytest_dependency import depends
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from functions import SSH_TEST
-from auto_config import user, password
-
-
-def test_core_get_methods(request):
-    results = SSH_TEST("midclt call core.get_methods", user, password)
-    assert results['result'] is True, results
diff --git a/tests/api2/test_account.py b/tests/api2/test_account.py
deleted file mode 100644
index 4835b75334a77..0000000000000
--- a/tests/api2/test_account.py
+++ /dev/null
@@ -1,157 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import user, group
-from middlewared.test.integration.utils import call, client
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-BASE_SYNTHETIC_DATASTORE_ID = 100000000
-DS_USR_VERR_STR = "Directory services users may not be added as members of local groups."
-DS_GRP_VERR_STR = "Local users may not be members of directory services groups."
-
-
-def test_create_account_audit():
-    user_id = None
-    try:
-        with expect_audit_method_calls([{
-            "method": "user.create",
-            "params": [
-                {
-                    "username": "sergey",
-                    "full_name": "Sergey",
-                    "group_create": True,
-                    "home": "/nonexistent",
-                    "password": "********",
-                }
-            ],
-            "description": "Create user sergey",
-        }]):
-            payload = {
-                "username": "sergey",
-                "full_name": "Sergey",
-                "group_create": True,
-                "home": "/nonexistent",
-                "password": "password",
-            }
-            user_id = call("user.create", payload)
-    finally:
-        if user_id is not None:
-            call("user.delete", user_id)
-
-
-def test_update_account_audit():
-    with user({
-        "username": "user2",
-        "full_name": "user2",
-        "group_create": True,
-        "password": "test1234",
-    }) as u:
-        with expect_audit_method_calls([{
-            "method": "user.update",
-            "params": [u["id"], {}],
-            "description": "Update user user2",
-        }]):
-            call("user.update", u["id"], {})
-
-
-def test_delete_account_audit():
-    with user({
-        "username": "user2",
-        "full_name": "user2",
-        "group_create": True,
-        "password": "test1234",
-    }) as u:
-        with expect_audit_method_calls([{
-            "method": "user.delete",
-            "params": [u["id"], {}],
-            "description": "Delete user user2",
-        }]):
-            call("user.delete", u["id"], {})
-
-
-def test_create_group_audit():
-    group_id = None
-    try:
-        with expect_audit_method_calls([{
-            "method": "group.create",
-            "params": [
-                {
-                    "name": "group2",
-                }
-            ],
-            "description": "Create group group2",
-        }]):
-            payload = {
-                "name": "group2",
-            }
-            group_id = call("group.create", payload)
-    finally:
-        if group_id is not None:
-            call("group.delete", group_id)
-
-
-def test_update_group_audit():
-    with group({
-        "name": "group2",
-    }) as g:
-        with expect_audit_method_calls([{
-            "method": "group.update",
-            "params": [g["id"], {}],
-            "description": "Update group group2",
-        }]):
-            call("group.update", g["id"], {})
-
-
-def test_delete_group_audit():
-    with group({
-        "name": "group2",
-    }) as g:
-        with expect_audit_method_calls([{
-            "method": "group.delete",
-            "params": [g["id"]],
-            "description": "Delete group group2",
-        }]):
-            call("group.delete", g["id"])
-
-
-def test_delete_group_audit_delete_users():
-    with group({
-        "name": "group2",
-    }) as g:
-        with expect_audit_method_calls([{
-            "method": "group.delete",
-            "params": [g["id"], {"delete_users": True}],
-            "description": "Delete group group2 and all users that have this group as their primary group",
-        }]):
-            call("group.delete", g["id"], {"delete_users": True})
-
-
-def test_update_account_using_token():
-    token = call("auth.generate_token", 300)
-
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", token)
-
-        c.call("user.update", 1, {})
-
-
-def test_create_local_group_ds_user():
-    with pytest.raises(ValidationErrors) as ve:
-        with group({"name": "local_ds", "users": [BASE_SYNTHETIC_DATASTORE_ID + 1]}):
-            pass
-
-    assert DS_USR_VERR_STR in str(ve)
-
-
-def test_create_local_user_ds_group():
-    with pytest.raises(ValidationErrors) as ve:
-        with user({
-            "username": "local_ds",
-            "groups": [BASE_SYNTHETIC_DATASTORE_ID + 1],
-            "full_name": "user_ds",
-            "group_create": True,
-            "password": "test1234",
-        }):
-            pass
-
-    assert DS_GRP_VERR_STR in str(ve)
diff --git a/tests/api2/test_account_duplicate_uid_gid.py b/tests/api2/test_account_duplicate_uid_gid.py
deleted file mode 100644
index 40d44563b164f..0000000000000
--- a/tests/api2/test_account_duplicate_uid_gid.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import errno
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors, ValidationError
-from middlewared.test.integration.assets.account import user, group
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-@pytest.fixture(scope="module")
-def uid_1234():
-    with dataset(f"user1_homedir") as user1_homedir:
-        with user({
-            "username": "user1",
-            "full_name": "user1",
-            "group_create": True,
-            "groups": [],
-            "home": f"/mnt/{user1_homedir}",
-            "password": "test1234",
-            "uid": 1234,
-        }) as uid_1234:
-            yield uid_1234
-
-
-@pytest.fixture(scope="module")
-def gid_1234():
-    with group({
-        "name": "group1",
-        "gid": 1234,
-    }) as gid_1234:
-        yield gid_1234
-
-
-def test_create_duplicate_uid(uid_1234):
-    with dataset(f"user2_homedir") as user2_homedir:
-        with pytest.raises(ValidationErrors) as ve:
-            with user({
-                "username": "user2",
-                "full_name": "user2",
-                "group_create": True,
-                "groups": [],
-                "home": f"/mnt/{user2_homedir}",
-                "password": "test1234",
-                "uid": 1234,
-            }):
-                pass
-
-        assert ve.value.errors == [
-            ValidationError('user_create.uid', 'Uid 1234 is already used (user user1 has it)', errno.EEXIST),
-        ]
-
-
-def test_create_duplicate_gid(gid_1234):
-    with pytest.raises(ValidationErrors) as ve:
-        with group({
-            "name": "group2",
-            "gid": 1234,
-        }):
-            pass
-
-    assert ve.value.errors == [
-        ValidationError('group_create.gid', 'Gid 1234 is already used (group group1 has it)', errno.EEXIST),
-    ]
diff --git a/tests/api2/test_account_group.py b/tests/api2/test_account_group.py
deleted file mode 100644
index 55f40a8af4073..0000000000000
--- a/tests/api2/test_account_group.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import pytest
-
-from middlewared.service_exception import InstanceNotFound
-from middlewared.test.integration.assets.account import user, group
-from middlewared.test.integration.utils import call
-
-
-def test_delete_group_delete_users():
-    with group({
-        "name": "group1",
-    }) as g:
-        with user({
-            "username": "user1",
-            "full_name": "user1",
-            "group": g["id"],
-            "password": "test1234",
-        }) as u1:
-            with user({
-                "username": "user2",
-                "full_name": "user2",
-                "group": g["id"],
-                "password": "test1234",
-            }) as u2:
-                with user({
-                    "username": "user3",
-                    "full_name": "user3",
-                    "group_create": True,
-                    "groups": [g["id"]],
-                    "password": "test1234",
-                }) as u3:
-                    call("group.delete", g["id"], {"delete_users": True})
-
-                    with pytest.raises(InstanceNotFound):
-                        call("user.get_instance", u1["id"])
-                    with pytest.raises(InstanceNotFound):
-                        call("user.get_instance", u2["id"])
-                    call("user.get_instance", u3["id"])
diff --git a/tests/api2/test_account_idmap.py b/tests/api2/test_account_idmap.py
deleted file mode 100644
index 746955600ea3c..0000000000000
--- a/tests/api2/test_account_idmap.py
+++ /dev/null
@@ -1,44 +0,0 @@
-import os
-import sys
-
-import pytest
-
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.utils import call, client
-
-LOCAL_USER_SID_PREFIX = 'S-1-22-1-'
-LOCAL_GROUP_SID_PREFIX = 'S-1-22-2-'
-
-def test_uid_idmapping():
-    with user({
-        'username': 'idmap_user',
-        'full_name': 'idmap_user',
-        'smb': True,
-        'group_create': True,
-        'password': 'test1234',
-    }) as u:
-        UNIX_SID = LOCAL_USER_SID_PREFIX + str(u['uid'])
-        results = call('idmap.convert_sids', [UNIX_SID])
-        assert results['unmapped'] == {}
-        assert UNIX_SID in results['mapped']
-
-        entry = results['mapped'][UNIX_SID]
-
-        assert entry['id_type'] == 'USER'
-        assert entry['id'] == u['uid']
-        assert entry['name'] == 'Unix User\\idmap_user'
-
-        results = call('idmap.convert_unixids', [{
-            'id_type': 'USER',
-            'id': u['uid'],
-        }])
-
-        assert results['unmapped'] == {}
-        entry = results['mapped'][f'UID:{u["uid"]}']
-        assert entry['id_type'] == 'USER'
-        assert entry['id'] == u['uid']
-        pdb_sid = entry['sid']
-
-        user_obj = call('user.get_user_obj', {'uid': u['uid'], 'sid_info': True})
-        assert 'sid' in user_obj
-        assert user_obj['sid'] == pdb_sid
diff --git a/tests/api2/test_account_privilege.py b/tests/api2/test_account_privilege.py
deleted file mode 100644
index c172488ad0bb7..0000000000000
--- a/tests/api2/test_account_privilege.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import errno
-import os
-import sys
-import types
-
-import pytest
-
-from middlewared.service_exception import CallError, ValidationErrors
-from middlewared.test.integration.assets.account import group, privilege, root_with_password_disabled
-from middlewared.test.integration.utils import call, mock
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-sys.path.append(os.getcwd())
-from functions import DELETE, POST, PUT
-
-
-def test_change_local_administrator_groups_to_invalid():
-    operator = call("group.query", [["group", "=", "operator"]], {"get": True})
-
-    with pytest.raises(ValidationErrors) as ve:
-        call("privilege.update", 1, {"local_groups": [operator["id"]]})
-
-    assert ve.value.errors[0].attribute == "privilege_update.local_groups"
-
-
-def test_change_local_administrator_allowlist():
-    with pytest.raises(ValidationErrors) as ve:
-        call("privilege.update", 1, {"allowlist": [{"method": "CALL", "resource": "system.info"}]})
-
-    assert ve.value.errors[0].attribute == "privilege_update.allowlist"
-
-
-def test_change_local_administrator_roles():
-    with pytest.raises(ValidationErrors) as ve:
-        call("privilege.update", 1, {"roles": ['READONLY_ADMIN']})
-
-    assert ve.value.errors[0].attribute == "privilege_update.roles"
-
-
-def test_delete_local_administrator():
-    with pytest.raises(CallError) as ve:
-        call("privilege.delete", 1)
-
-    assert ve.value.errno == errno.EPERM
-
-
-def test_invalid_local_group():
-    with pytest.raises(ValidationErrors) as ve:
-        call("privilege.create", {
-            "name": "Test",
-            "local_groups": [1024],  # invalid local group ID
-            "ds_groups": [],
-            "allowlist": [{"method": "CALL", "resource": "system.info"}],
-            "web_shell": False,
-        })
-
-    assert ve.value.errors[0].attribute == "privilege_create.local_groups.0"
-
-
-def test_delete_local_administrator_group():
-    with group({
-        "name": "test_local_admins",
-    }) as g:
-        local_groups = [lg["gid"] for lg in call("privilege.get_instance", 1)["local_groups"]]
-        call("privilege.update", 1, {"local_groups": local_groups + [g["gid"]]})
-
-        with pytest.raises(CallError) as ve:
-            call("group.delete", g["id"])
-
-        assert ve.value.errmsg.startswith("This group is used by privilege")
-
-        call("privilege.update", 1, {"local_groups": local_groups})
-
-
-@pytest.fixture(scope="module")
-def privilege_with_orphan_local_group():
-    with group({
-        "name": "test_orphan",
-        "smb": False,
-    }) as g:
-        gid = g["gid"]
-        privilege = call("privilege.create", {
-            "name": "Test orphan",
-            "local_groups": [gid],
-            "ds_groups": [],
-            "allowlist": [{"method": "CALL", "resource": "system.info"}],
-            "web_shell": False,
-        })
-        call("datastore.delete", "account.bsdgroups", g["id"])
-        call("etc.generate", "user")
-        call("idmap.gencache.flush")
-
-    yield types.SimpleNamespace(gid=gid, privilege=privilege)
-
-    call("privilege.delete", privilege["id"])
-
-
-def test_create_group_with_orphan_privilege_gid(privilege_with_orphan_local_group):
-    with pytest.raises(ValidationErrors) as ve:
-        with group({
-            "name": "test_orphan_duplicate",
-            "gid": privilege_with_orphan_local_group.gid,
-        }):
-            pass
-
-    assert ve.value.errors[0].attribute == "group_create.gid"
-    assert ve.value.errors[0].errmsg.startswith("A privilege 'Test orphan' already uses this group ID.")
-
-
-def test_group_next_gid():
-    next_gid = call("group.get_next_gid")
-    with mock("privilege.used_local_gids", f"""
-        async def mock(self):
-            result = await self.used_local_gids()
-            result[{next_gid}] = None
-            return result
-    """):
-        assert call("group.get_next_gid") == next_gid + 1
-
-
-def test_remove_only_local_administrator_password_enabled_user():
-    root = call("user.query", [["username", "=", "root"]], {"get": True})
-    with pytest.raises(ValidationErrors) as ve:
-        call("user.update", root["id"], {"password_disabled": True})
-
-    assert ve.value.errors[0].attribute == "user_update.password_disabled"
-    assert ve.value.errors[0].errmsg == (
-        "After disabling password for this user no password-enabled local user will have built-in privilege "
-        "'Local Administrator'."
-    )
-
-
-def test_password_disabled_root_is_a_local_administrator():
-    with root_with_password_disabled():
-        local_administrators = call("privilege.local_administrators")
-
-        assert len(local_administrators) == 1
-        assert local_administrators[0]["username"] == "root"
-
-
-@pytest.mark.parametrize("api", ["ws", "rest"])
-def test_create_privilege_audit(api):
-    privilege = None
-    try:
-        with expect_audit_method_calls([{
-            "method": "privilege.create",
-            "params": [
-                {
-                    "name": "Test",
-                    "web_shell": False,
-                }
-            ],
-            "description": "Create privilege Test",
-        }]):
-            payload = {
-                "name": "Test",
-                "web_shell": False,
-            }
-            if api == "ws":
-                privilege = call("privilege.create", payload)
-            elif api == "rest":
-                result = POST(f"/privilege/", payload)
-                assert result.status_code == 200, result.text
-                privilege = result.json()
-            else:
-                raise ValueError(api)
-    finally:
-        if privilege is not None:
-            call("privilege.delete", privilege["id"])
-
-
-@pytest.mark.parametrize("api", ["ws", "rest"])
-def test_update_privilege_audit(api):
-    with privilege({
-        "name": "Test",
-        "web_shell": False,
-    }) as p:
-        with expect_audit_method_calls([{
-            "method": "privilege.update",
-            "params": [p["id"], {}],
-            "description": "Update privilege Test",
-        }]):
-            if api == "ws":
-                call("privilege.update", p["id"], {})
-            elif api == "rest":
-                result = PUT(f"/privilege/id/{p['id']}", {})
-                assert result.status_code == 200, result.text
-            else:
-                raise ValueError(api)
-
-
-@pytest.mark.parametrize("api", ["ws", "rest"])
-def test_delete_privilege_audit(api):
-    with privilege({
-        "name": "Test",
-        "web_shell": False,
-    }) as p:
-        with expect_audit_method_calls([{
-            "method": "privilege.delete",
-            "params": [p["id"]],
-            "description": "Delete privilege Test",
-        }]):
-            if api == "ws":
-                call("privilege.delete", p["id"])
-            elif api == "rest":
-                result = DELETE(f"/privilege/id/{p['id']}")
-                assert result.status_code == 200, result.text
-            else:
-                raise ValueError(api)
diff --git a/tests/api2/test_account_privilege_authentication.py b/tests/api2/test_account_privilege_authentication.py
deleted file mode 100644
index 70516a257aa58..0000000000000
--- a/tests/api2/test_account_privilege_authentication.py
+++ /dev/null
@@ -1,204 +0,0 @@
-import errno
-import json
-import logging
-
-import pytest
-import websocket
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import user, unprivileged_user as unprivileged_user_template
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, client, ssh, websocket_url
-from middlewared.test.integration.utils.shell import assert_shell_works
-
-logger = logging.getLogger(__name__)
-
-
-@pytest.fixture(scope="module")
-def unprivileged_user():
-    with unprivileged_user_template(
-        username="unprivileged",
-        group_name="unprivileged_users",
-        privilege_name="Unprivileged users",
-        allowlist=[{"method": "CALL", "resource": "system.info"}],
-        web_shell=False,
-    ) as t:
-        yield t
-
-
-@pytest.fixture()
-def unprivileged_user_token(unprivileged_user):
-    with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c:
-        return c.call("auth.generate_token", 300, {}, True)
-
-
-@pytest.fixture(scope="module")
-def unprivileged_user_with_web_shell():
-    with unprivileged_user_template(
-        username="unprivilegedws",
-        group_name="unprivileged_users_ws",
-        privilege_name="Unprivileged users with web shell",
-        allowlist=[],
-        web_shell=True,
-    ) as t:
-        yield t
-
-
-@pytest.fixture()
-def unprivileged_user_with_web_shell_token(unprivileged_user_with_web_shell):
-    with client(auth=(unprivileged_user_with_web_shell.username, unprivileged_user_with_web_shell.password)) as c:
-        return c.call("auth.generate_token", 300, {}, True)
-
-
-def test_libpam_auth(unprivileged_user):
-    pam_resp = call('auth.libpam_authenticate', unprivileged_user.username, unprivileged_user.password)
-    assert pam_resp['code'] == 0
-    assert pam_resp['reason'] == 'Success'
-
-    pam_resp = call('auth.libpam_authenticate', unprivileged_user.username, 'CANARY')
-    assert pam_resp['code'] == 7
-    assert pam_resp['reason'] == 'Authentication failure'
-
-
-def test_websocket_auth_session_list_terminate(unprivileged_user):
-    with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c:
-        sessions = call("auth.sessions")
-        my_sessions = [
-            s for s in sessions
-            if s["credentials"] == "LOGIN_PASSWORD" and s["credentials_data"]["username"] == unprivileged_user.username
-        ]
-        assert len(my_sessions) == 1, sessions
-
-        call("auth.terminate_session", my_sessions[0]["id"])
-
-        with pytest.raises(Exception):
-            c.call("system.info")
-
-    sessions = call("auth.sessions")
-    assert not [
-        s for s in sessions
-        if s["credentials"] == "LOGIN_PASSWORD" and s["credentials_data"]["username"] == unprivileged_user.username
-    ], sessions
-
-
-def test_websocket_auth_terminate_all_other_sessions(unprivileged_user):
-    with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c:
-        call("auth.terminate_other_sessions")
-
-        with pytest.raises(Exception):
-            c.call("system.info")
-
-
-def test_websocket_auth_get_methods(unprivileged_user):
-    with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c:
-        methods = c.call("core.get_methods")
-
-    assert "system.info" in methods
-    assert "pool.create" not in methods
-
-
-def test_websocket_auth_calls_allowed_method(unprivileged_user):
-    with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c:
-        c.call("system.info")
-
-
-def test_websocket_auth_fails_to_call_forbidden_method(unprivileged_user):
-    with client(auth=(unprivileged_user.username, unprivileged_user.password)) as c:
-        with pytest.raises(CallError) as ve:
-            c.call("pool.create")
-
-        assert ve.value.errno == errno.EACCES
-
-
-def test_unix_socket_auth_get_methods(unprivileged_user):
-    methods = json.loads(ssh(f"sudo -u {unprivileged_user.username} midclt call core.get_methods"))
-
-    assert "system.info" in methods
-    assert "pool.create" not in methods
-
-
-def test_unix_socket_auth_calls_allowed_method(unprivileged_user):
-    ssh(f"sudo -u {unprivileged_user.username} midclt call system.info")
-
-
-def test_unix_socket_auth_fails_to_call_forbidden_method(unprivileged_user):
-    result = ssh(f"sudo -u {unprivileged_user.username} midclt call pool.create", check=False, complete_response=True)
-    assert "Not authorized" in result["stderr"]
-
-
-def test_unix_socket_auth_fails_when_user_has_no_privilege():
-    with dataset(f"noconnect_homedir") as homedir:
-        with user({
-            "username": "noconnect",
-            "full_name": "Noconnect",
-            "group_create": True,
-            "groups": [],
-            "home": f"/mnt/{homedir}",
-            "password": "test1234",
-        }):
-            result = ssh(f"sudo -u noconnect midclt call pool.create", check=False, complete_response=True)
-            assert "Not authenticated" in result["stderr"]
-
-
-def test_token_auth_session_list_terminate(unprivileged_user, unprivileged_user_token):
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", unprivileged_user_token)
-
-        sessions = call("auth.sessions")
-        my_sessions = [
-            s for s in sessions
-            if (
-                s["credentials"] == "TOKEN" and
-                s["credentials_data"]["parent"]["credentials"] == "LOGIN_PASSWORD" and
-                s["credentials_data"]["parent"]["credentials_data"]["username"] == unprivileged_user.username
-            )
-        ]
-        assert len(my_sessions) == 1, sessions
-
-        call("auth.terminate_session", my_sessions[0]["id"])
-
-        with pytest.raises(Exception):
-            c.call("system.info")
-
-
-def test_token_auth_calls_allowed_method(unprivileged_user_token):
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", unprivileged_user_token)
-
-        c.call("system.info")
-
-
-def test_token_auth_fails_to_call_forbidden_method(unprivileged_user_token):
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", unprivileged_user_token)
-
-        with pytest.raises(CallError) as ve:
-            c.call("pool.create")
-
-        assert ve.value.errno == errno.EACCES
-
-
-def test_drop_privileges(unprivileged_user_token):
-    with client() as c:
-        # This should drop privileges for the current root session
-        assert c.call("auth.login_with_token", unprivileged_user_token)
-
-        with pytest.raises(CallError) as ve:
-            c.call("pool.create")
-
-        assert ve.value.errno == errno.EACCES
-
-
-def test_token_auth_working_not_working_web_shell(unprivileged_user_token):
-    ws = websocket.create_connection(websocket_url() + "/websocket/shell")
-    try:
-        ws.send(json.dumps({"token": unprivileged_user_token}))
-        resp_opcode, msg = ws.recv_data()
-        assert json.loads(msg.decode())["msg"] == "failed"
-    finally:
-        ws.close()
-
-
-@pytest.mark.timeout(30)
-def test_token_auth_working_web_shell(unprivileged_user_with_web_shell_token):
-    assert_shell_works(unprivileged_user_with_web_shell_token, "unprivilegedws")
diff --git a/tests/api2/test_account_privilege_role.py b/tests/api2/test_account_privilege_role.py
deleted file mode 100644
index 88d572b74ce29..0000000000000
--- a/tests/api2/test_account_privilege_role.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import errno
-import logging
-from time import sleep
-
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.assets.pool import dataset, snapshot
-from middlewared.test.integration.utils import client
-
-logger = logging.getLogger(__name__)
-
-
-@pytest.mark.parametrize("role", ["SNAPSHOT_READ", "SNAPSHOT_WRITE"])
-def test_can_read_with_read_or_write_role(role):
-    with dataset("test_snapshot_read") as ds:
-        with snapshot(ds, "test"):
-            with unprivileged_user_client([role]) as c:
-                assert len(c.call("zfs.snapshot.query", [["dataset", "=", ds]])) == 1
-
-
-def test_can_not_write_with_read_role():
-    with dataset("test_snapshot_write1") as ds:
-        with unprivileged_user_client(["SNAPSHOT_READ"]) as c:
-            with pytest.raises(CallError) as ve:
-                c.call("zfs.snapshot.create", {
-                    "dataset": ds,
-                    "name": "test",
-                })
-
-            assert ve.value.errno == errno.EACCES
-
-
-def test_write_with_write_role():
-    with dataset("test_snapshot_write2") as ds:
-        with unprivileged_user_client(["SNAPSHOT_WRITE"]) as c:
-            c.call("zfs.snapshot.create", {
-                "dataset": ds,
-                "name": "test",
-            })
-
-
-def test_can_delete_with_write_role_with_separate_delete():
-    with dataset("test_snapshot_delete1") as ds:
-        with snapshot(ds, "test") as id:
-            with unprivileged_user_client(["SNAPSHOT_DELETE"]) as c:
-                c.call("zfs.snapshot.delete", id)
-
-
-def test_can_not_delete_with_write_role_with_separate_delete():
-    with dataset("test_snapshot_delete2") as ds:
-        with snapshot(ds, "test") as id:
-            with unprivileged_user_client(["SNAPSHOT_WRITE"]) as c:
-                with pytest.raises(CallError) as ve:
-                    c.call("zfs.snapshot.delete", id)
-
-                assert ve.value.errno == errno.EACCES
-
-
-def test_works_for_redefined_crud_method():
-    with unprivileged_user_client(["SHARING_ADMIN"]) as c:
-        c.call("service.update", "cifs", {"enable": False})
-
-
-def test_full_admin_role():
-    with unprivileged_user_client(["FULL_ADMIN"]) as c:
-        c.call("system.general.config")
-
-        # User with FULL_ADMIN role should have something in jobs list
-        assert len(c.call("core.get_jobs")) != 0
-
-        # attempt to wait / cancel job should not fail
-        jid = c.call("core.job_test", {"sleep": 1})
-
-        c.call("core.job_wait", jid, job=True)
-
-        c.call("core.job_abort", jid)
-
-
-@pytest.mark.parametrize("role,method,params", [
-    ("DATASET_READ", "pool.dataset.checksum_choices", []),
-])
-def test_read_role_can_call_method(role, method, params):
-    with unprivileged_user_client([role]) as c:
-        c.call(method, *params)
-
-
-@pytest.mark.parametrize("method,params", [
-    ("system.general.config", []),
-    ("user.get_instance", [1]),
-    ("user.query", []),
-    ("user.shell_choices", []),
-    ("auth.me", []),
-    ("filesystem.listdir", ["/"]),
-    ("filesystem.stat", ["/"]),
-    ("filesystem.getacl", ["/"]),
-    ("filesystem.acltemplate.by_path", [{"path": "/"}]),
-    ("pool.dataset.details", []),
-    ("core.get_jobs", []),
-])
-def test_readonly_can_call_method(method, params):
-    with unprivileged_user_client(["READONLY_ADMIN"]) as c:
-        c.call(method, *params)
-
-
-def test_readonly_can_not_call_method():
-    with unprivileged_user_client(["READONLY_ADMIN"]) as c:
-        with pytest.raises(CallError) as ve:
-            c.call("user.create")
-
-        assert ve.value.errno == errno.EACCES
-
-        with pytest.raises(CallError) as ve:
-            # fails with EPERM if API access granted
-            c.call("filesystem.mkdir", "/foo")
-
-        assert ve.value.errno == errno.EACCES
-
-
-def test_limited_user_can_set_own_attributes():
-    with unprivileged_user_client(["READONLY_ADMIN"]) as c:
-        c.call("auth.set_attribute", "foo", "bar")
-        attrs = c.call("auth.me")["attributes"]
-        assert "foo" in attrs
-        assert attrs["foo"] == "bar"
-
-
-def test_limited_user_auth_token_behavior():
-    with unprivileged_user_client(["READONLY_ADMIN"]) as c:
-        auth_token = c.call("auth.generate_token")
-
-        with client(auth=None) as c2:
-            assert c2.call("auth.login_with_token", auth_token)
-            c2.call("auth.me")
-            c2.call("core.get_jobs")
-
-
-def test_sharing_manager_jobs():
-    with unprivileged_user_client(["SHARING_ADMIN"]) as c:
-        auth_token = c.call("auth.generate_token")
-        jid = c.call("core.job_test", {"sleep": 1})
-
-        with client(auth=None) as c2:
-            #c.call("core.job_wait", jid, job=True)
-            assert c2.call("auth.login_with_token", auth_token)
-            wait_job_id = c2.call("core.job_wait", jid)
-            sleep(2)
-            result = c2.call("core.get_jobs", [["id", "=", wait_job_id]], {"get": True})
-            assert result["state"] == "SUCCESS"
-            c2.call("core.job_abort", wait_job_id)
-
-
-def test_foreign_job_access():
-    with unprivileged_user_client(["READONLY_ADMIN"]) as unprivileged:
-        with client() as c:
-            job = c.call("core.job_test")
-
-            wait_job_id = unprivileged.call("core.job_wait", job)
-            sleep(2)
-            result = unprivileged.call("core.get_jobs", [["id", "=", wait_job_id]], {"get": True})
-            assert result["state"] != "SUCCESS"
-
-            jobs = unprivileged.call("core.get_jobs", [["id", "=", job]])
-            assert jobs == []
-
-    with unprivileged_user_client(["FULL_ADMIN"]) as unprivileged:
-        with client() as c:
-            job = c.call("core.job_test")
-
-            wait_job_id = unprivileged.call("core.job_wait", job)
-            sleep(2)
-            result = unprivileged.call("core.get_jobs", [["id", "=", wait_job_id]], {"get": True})
-            assert result["state"] == "SUCCESS"
-
-
-def test_can_not_subscribe_to_event():
-    with unprivileged_user_client() as unprivileged:
-        with pytest.raises(CallError) as ve:
-            unprivileged.subscribe("alert.list", lambda *args, **kwargs: None)
-
-        assert ve.value.errno == errno.EACCES
-
-
-def test_can_subscribe_to_event():
-    with unprivileged_user_client(["READONLY_ADMIN"]) as unprivileged:
-        unprivileged.subscribe("alert.list", lambda *args, **kwargs: None)
diff --git a/tests/api2/test_account_privilege_role_forbidden_fields.py b/tests/api2/test_account_privilege_role_forbidden_fields.py
deleted file mode 100644
index 1bd34eb517946..0000000000000
--- a/tests/api2/test_account_privilege_role_forbidden_fields.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.assets.cloud_sync import local_ftp_credential
-from middlewared.test.integration.assets.pool import dataset
-
-
-@pytest.fixture(scope="module")
-def unprivileged_client():
-    with unprivileged_user_client(["CLOUD_BACKUP_WRITE", "CLOUD_SYNC_WRITE"]) as c:
-        yield c
-
-
-@pytest.fixture(scope="module")
-def cloudsync_template():
-    with local_ftp_credential() as credential:
-        with dataset("cloud_backup") as local_dataset:
-            yield {
-                "path": f"/mnt/{local_dataset}",
-                "credentials": credential["id"],
-                "attributes": {
-                    "folder": "",
-                },
-            }
-
-
-@pytest.mark.parametrize("param,value", [
-    ("pre_script", "rm -rf /"),
-    ("post_script", "rm -rf /"),
-])
-def test_cloud_backup(unprivileged_client, cloudsync_template, param, value):
-    with pytest.raises(ValidationErrors) as ve:
-        unprivileged_client.call("cloud_backup.create", {
-            **cloudsync_template,
-            "password": "test",
-            "keep_last": 10,
-            param: value,
-        })
-
-    assert any(error.attribute == f"cloud_backup_create.{param}" for error in ve.value.errors), ve
-
-
-@pytest.mark.parametrize("param,value", [
-    ("pre_script", "rm -rf /"),
-    ("post_script", "rm -rf /"),
-])
-def test_cloud_sync(unprivileged_client, cloudsync_template, param, value):
-    with pytest.raises(ValidationErrors) as ve:
-        unprivileged_client.call("cloudsync.create", {
-            **cloudsync_template,
-            "direction": "PUSH",
-            "transfer_mode": "COPY",
-            param: value,
-        })
-
-    assert any(error.attribute == f"cloud_sync_create.{param}" for error in ve.value.errors), ve
diff --git a/tests/api2/test_account_privilege_role_private_fields.py b/tests/api2/test_account_privilege_role_private_fields.py
deleted file mode 100644
index 91df0f1f3b477..0000000000000
--- a/tests/api2/test_account_privilege_role_private_fields.py
+++ /dev/null
@@ -1,236 +0,0 @@
-import contextlib
-
-import pytest
-
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.assets.api_key import api_key
-from middlewared.test.integration.assets.cloud_backup import task as cloud_backup_task
-from middlewared.test.integration.assets.cloud_sync import local_ftp_credential, local_ftp_task
-from middlewared.test.integration.assets.crypto import root_certificate_authority
-from middlewared.test.integration.assets.datastore import row
-from middlewared.test.integration.assets.keychain import ssh_keypair
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, client, mock
-
-REDACTED = "********"
-
-
-@pytest.fixture(scope="module")
-def readonly_client():
-    with unprivileged_user_client(["READONLY_ADMIN"]) as c:
-        yield c
-
-
-@contextlib.contextmanager
-def wrap(id):
-    yield id
-
-
-@contextlib.contextmanager
-def certificateauthority():
-    with root_certificate_authority("ca_test") as ca:
-        yield ca["id"]
-
-
-@contextlib.contextmanager
-def cloudbackup():
-    with local_ftp_credential() as credential:
-        with dataset("cloud_backup") as local_dataset:
-            with mock("cloud_backup.ensure_initialized", return_value=None):
-                with cloud_backup_task({
-                    "path": f"/mnt/{local_dataset}",
-                    "credentials": credential["id"],
-                    "attributes": {
-                        "folder": "",
-                    },
-                    "password": "test",
-                }) as task:
-                    yield task["id"]
-
-
-@contextlib.contextmanager
-def cloudsync_credential():
-    with local_ftp_credential() as credential:
-        yield credential["id"]
-
-
-@contextlib.contextmanager
-def cloudsync():
-    with local_ftp_task() as task:
-        yield task["id"]
-
-
-@contextlib.contextmanager
-def disk():
-    disks = call("disk.query")
-    yield disks[0]["identifier"]
-
-
-@contextlib.contextmanager
-def dns_authenticator():
-    with row(
-        "system.acmednsauthenticator",
-        {
-            "name": "test",
-            "attributes": {
-                "authenticator": "cloudflare",
-                "cloudflare_email": "test@gmail.com",
-                "api_key": "key",
-                "api_token": "token",
-            },
-        },
-    ) as id:
-        yield id
-
-
-@contextlib.contextmanager
-def idmap():
-    with row(
-        "directoryservice.idmap_domain",
-        {
-            "name": "test",
-            "dns_domain_name": "test",
-            "range_low": 1000,
-            "range_high": 1001,
-            "idmap_backend": "LDAP",
-            "options": {
-                "ldap_base_dn": "cn=BASEDN",
-                "ldap_user_dn": "cn=USERDN",
-                "ldap_url": "ldap://127.0.0.1",
-                "ldap_user_dn_password": "password"
-            },
-        },
-        {"prefix": "idmap_domain_"},
-    ) as id:
-        yield id
-
-
-@contextlib.contextmanager
-def vm_device():
-    with row(
-        "vm.vm",
-        {
-            "id": 5,
-            "name": "",
-            "memory": 225
-        }):
-        with row(
-            "vm.device",
-            {
-                "id": 7,
-                "vm": 5,
-                "attributes": {
-                    "dtype": "DISPLAY",
-                    "bind": "127.0.0.1",
-                    "port": 1,
-                    "web_port": 1,
-                    "password": "pass",
-                }
-            }
-        ) as id:
-            yield id
-
-
-@contextlib.contextmanager
-def iscsi_auth():
-    auth = call("iscsi.auth.create", {
-        "tag": 1,
-        "user": "test",
-        "secret": "secretsecret",
-        "peeruser": "peeruser",
-        "peersecret": "peersecretsecret",
-    })
-    try:
-        yield auth["id"]
-    finally:
-        call("iscsi.auth.delete", auth["id"])
-
-
-@contextlib.contextmanager
-def keychaincredential():
-    with ssh_keypair() as k:
-        yield k["id"]
-
-
-@contextlib.contextmanager
-def vmware():
-    with row(
-        "storage.vmwareplugin",
-        {
-            "password": "password",
-        },
-    ) as id:
-        yield id
-
-
-@pytest.mark.parametrize("how", ["multiple", "single", "get_instance"])
-@pytest.mark.parametrize("service,id,options,redacted_fields", (
-    ("acme.dns.authenticator", dns_authenticator, {}, ["attributes"]),
-    ("certificate", 1, {}, ["privatekey", "issuer"]),
-    ("certificateauthority", certificateauthority, {}, ["privatekey", "issuer"]),
-    ("cloud_backup", cloudbackup, {}, ["credentials.provider", "password"]),
-    ("cloudsync.credentials", cloudsync_credential, {}, ["provider.pass"]),
-    ("cloudsync", cloudsync, {}, ["credentials.provider", "encryption_password"]),
-    ("disk", disk, {"extra": {"passwords": True}}, ["passwd"]),
-    ("idmap", idmap, {}, ["options.ldap_user_dn_password"]),
-    ("iscsi.auth", iscsi_auth, {}, ["secret", "peersecret"]),
-    ("keychaincredential", keychaincredential, {}, ["attributes"]),
-    ("user", 1, {}, ["unixhash", "smbhash"]),
-    ("vmware", vmware, {}, ["password"]),
-    ("vm.device", vm_device, {}, ["attributes.password"]),
-))
-def test_crud(readonly_client, how, service, id, options, redacted_fields):
-    identifier = "id" if service != "disk" else "identifier"
-
-    with (id() if callable(id) else wrap(id)) as id:
-        if how == "multiple":
-            result = readonly_client.call(f"{service}.query", [[identifier, "=", id]], options)[0]
-        elif how == "single":
-            result = readonly_client.call(f"{service}.query", [[identifier, "=", id]], {**options, "get": True})
-        elif how == "get_instance":
-            result = readonly_client.call(f"{service}.get_instance", id, options)
-        else:
-            assert False
-
-        for k in redacted_fields:
-            obj = result
-            for path in k.split("."):
-                obj = obj[path]
-
-            assert obj == REDACTED, (k, obj, REDACTED)
-
-
-@pytest.mark.parametrize("service,redacted_fields", (
-    ("system.general", ["ui_certificate"]),
-    ("ldap", ["bindpw"]),
-    ("mail", ["pass", "oauth"]),
-    ("snmp", ["v3_password", "v3_privpassphrase"]),
-    ("truecommand", ["api_key"]),
-))
-def test_config(readonly_client, service, redacted_fields):
-    result = readonly_client.call(f"{service}.config")
-
-    for k in redacted_fields:
-        assert result[k] == REDACTED
-
-
-def test_fields_are_visible_if_has_write_access():
-    with unprivileged_user_client(["ACCOUNT_WRITE"]) as c:
-        result = c.call("user.get_instance", 1)
-
-    assert result["unixhash"] != REDACTED
-
-
-def test_fields_are_visible_for_api_key():
-    with api_key() as key:
-        with client(auth=None) as c:
-            assert c.call("auth.login_with_api_key", key)
-            result = c.call("user.get_instance", 1)
-
-    assert result["unixhash"] != REDACTED
-
-
-def test_vm_display_device(readonly_client):
-    with vm_device():
-        result = readonly_client.call("vm.get_display_devices", 5)
-        assert result[0]["attributes"]["password"] == REDACTED
diff --git a/tests/api2/test_account_query_roles.py b/tests/api2/test_account_query_roles.py
deleted file mode 100644
index e1321031ae3f8..0000000000000
--- a/tests/api2/test_account_query_roles.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.account import unprivileged_user_client
-
-
-@pytest.mark.parametrize("role", ["READONLY_ADMIN", "FULL_ADMIN"])
-def test_user_role_in_account(role):
-    with unprivileged_user_client(roles=[role]) as c:
-        this_user = c.call("user.query", [["username", "=", c.username]], {"get": True})
-
-        assert this_user['roles'] == [role]
-
-
-def test_user_role_full_admin_map():
-    with unprivileged_user_client(allowlist=[{"method": "*", "resource": "*"}]) as c:
-        this_user = c.call("user.query", [["username", "=", c.username]], {"get": True})
-
-        assert "FULL_ADMIN" in this_user["roles"]
-        assert "HAS_ALLOW_LIST" in this_user["roles"]
diff --git a/tests/api2/test_account_root_password.py b/tests/api2/test_account_root_password.py
deleted file mode 100644
index 8811a8c760971..0000000000000
--- a/tests/api2/test_account_root_password.py
+++ /dev/null
@@ -1,61 +0,0 @@
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.utils import call, client
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-
-
-def test_root_password_disabled():
-    with client() as c:
-        root_user_id = c.call(
-            "datastore.query",
-            "account.bsdusers",
-            [["username", "=", "root"]],
-            {"get": True, "prefix": "bsdusr_"},
-        )["id"]
-
-        c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": True})
-        c.call("etc.generate", "user")
-        try:
-            alerts = c.call("alert.list")
-            assert any(alert["klass"] == "WebUiRootLogin" for alert in alerts), alerts
-
-            builtin_administrators_group_id = c.call(
-                "datastore.query",
-                "account.bsdgroups",
-                [["group", "=", "builtin_administrators"]],
-                {"get": True, "prefix": "bsdgrp_"},
-            )["id"]
-
-            with dataset(f"admin_homedir") as homedir:
-                events = []
-
-                def callback(type, **message):
-                    events.append((type, message))
-
-                c.subscribe("user.web_ui_login_disabled", callback, sync=True)
-
-                with user({
-                    "username": "admin",
-                    "full_name": "Admin",
-                    "group_create": True,
-                    "groups": [builtin_administrators_group_id],
-                    "home": f"/mnt/{homedir}",
-                    "password": "test1234",
-                }, get_instance=False):
-                    alerts = c.call("alert.list")
-                    assert not any(alert["klass"] == "WebUiRootLogin" for alert in alerts), alerts
-
-                    # Root should not be able to log in with password anymore
-                    with pytest.raises(CallError):
-                        call("system.info", client_kwargs=dict(auth_required=False))
-
-                    assert events[0][1]["fields"]["usernames"] == ["admin"]
-
-                    c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": False})
-                    c.call("etc.generate", "user")
-        finally:
-            # In case of a failure
-            c.call("datastore.update", "account.bsdusers", root_user_id, {"bsdusr_password_disabled": False})
-            c.call("etc.generate", "user")
diff --git a/tests/api2/test_account_shell_choices.py b/tests/api2/test_account_shell_choices.py
deleted file mode 100644
index 29ede61eae80a..0000000000000
--- a/tests/api2/test_account_shell_choices.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import group, user
-from middlewared.test.integration.utils import call
-
-
-def test_shell_choices_has_no_privileges():
-    with group({
-        "name": "test_no_privileges",
-    }) as g:
-        assert "/usr/bin/cli" not in call("user.shell_choices", [g["id"]])
-
-
-def test_shell_choices_has_privileges():
-    with group({
-        "name": "test_has_privileges",
-    }) as g:
-        privilege = call("privilege.create", {
-            "name": "Test",
-            "local_groups": [g["gid"]],
-            "ds_groups": [],
-            "allowlist": [{"method": "CALL", "resource": "system.info"}],
-            "web_shell": False,
-        })
-        try:
-            assert "/usr/bin/cli" in call("user.shell_choices", [g["id"]])
-        finally:
-            call("privilege.delete", privilege["id"])
-
-
-@pytest.mark.parametrize("group_payload", [
-    lambda g: {"group": g["id"]},
-    lambda g: {"group_create": True, "groups": [g["id"]]},
-])
-def test_cant_create_user_with_cli_shell_without_privileges(group_payload):
-    with group({
-        "name": "test_no_privileges",
-    }) as g:
-        with pytest.raises(ValidationErrors) as ve:
-            with user({
-                "username": "test",
-                "full_name": "Test",
-                "home": f"/nonexistent",
-                "password": "test1234",
-                "shell": "/usr/bin/cli",
-                **group_payload(g),
-            }):
-                pass
-
-        assert ve.value.errors[0].attribute == "user_create.shell"
-
-
-@pytest.mark.parametrize("group_payload", [
-    lambda g: {"group": g["id"]},
-    lambda g: {"group_create": True, "groups": [g["id"]]},
-])
-def test_can_create_user_with_cli_shell_with_privileges(group_payload):
-    with group({
-        "name": "test_no_privileges",
-    }) as g:
-        privilege = call("privilege.create", {
-            "name": "Test",
-            "local_groups": [g["gid"]],
-            "ds_groups": [],
-            "allowlist": [{"method": "CALL", "resource": "system.info"}],
-            "web_shell": False,
-        })
-        try:
-            with user({
-                "username": "test",
-                "full_name": "Test",
-                "home": f"/nonexistent",
-                "password": "test1234",
-                "shell": "/usr/bin/cli",
-                **group_payload(g),
-            }):
-                pass
-        finally:
-            call("privilege.delete", privilege["id"])
-
-
-@pytest.mark.parametrize("group_payload", [
-    lambda g: {"group": g["id"]},
-    lambda g: {"groups": [g["id"]]},
-])
-def test_cant_update_user_with_cli_shell_without_privileges(group_payload):
-    with group({
-        "name": "test_no_privileges",
-    }) as g:
-        with user({
-            "username": "test",
-            "full_name": "Test",
-            "home": f"/nonexistent",
-            "password": "test1234",
-            "group_create": True,
-        }) as u:
-            with pytest.raises(ValidationErrors) as ve:
-                call("user.update", u["id"], {
-                    "shell": "/usr/bin/cli",
-                    **group_payload(g),
-                })
-
-            assert ve.value.errors[0].attribute == "user_update.shell"
-
-
-@pytest.mark.parametrize("group_payload", [
-    lambda g: {"group": g["id"]},
-    lambda g: {"groups": [g["id"]]},
-])
-def test_can_update_user_with_cli_shell_with_privileges(group_payload):
-    with group({
-        "name": "test_no_privileges",
-    }) as g:
-        privilege = call("privilege.create", {
-            "name": "Test",
-            "local_groups": [g["gid"]],
-            "ds_groups": [],
-            "allowlist": [{"method": "CALL", "resource": "system.info"}],
-            "web_shell": False,
-        })
-        try:
-            with user({
-                "username": "test",
-                "full_name": "Test",
-                "home": f"/nonexistent",
-                "password": "test1234",
-                "group_create": True,
-            }) as u:
-                call("user.update", u["id"], {
-                    "shell": "/usr/bin/cli",
-                    **group_payload(g),
-                })
-        finally:
-            call("privilege.delete", privilege["id"])
diff --git a/tests/api2/test_account_ssh_key.py b/tests/api2/test_account_ssh_key.py
deleted file mode 100644
index c55698e58331a..0000000000000
--- a/tests/api2/test_account_ssh_key.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-
-
-def test_account_create_update_ssh_key_in_existing_dir():
-    with dataset("home") as ds:
-        homedir = f"/mnt/{ds}"
-        with user({
-            "username": "test",
-            "full_name": "Test",
-            "home": homedir,
-            "password": "test1234",
-            "group_create": True,
-            "sshpubkey": "old",
-        }) as u:
-            call("user.delete", u["id"])
-
-            with user({
-                "username": "test",
-                "full_name": "Test",
-                "home": homedir,
-                "password": "test1234",
-                "group_create": True,
-                "sshpubkey": "new",
-            }) as u:
-                u = call("user.get_instance", u["id"])
-                assert u["sshpubkey"] == "new"
-
-
-def test_account_update_ssh_key_and_set_homedir():
-    with dataset("home") as ds:
-        homedir = f"/mnt/{ds}"
-
-        with user({
-            "username": "test",
-            "full_name": "Test",
-            "password": "test1234",
-            "group_create": True,
-        }) as u:
-            call("user.update", u["id"], {
-                "home": homedir,
-                "sshpubkey": "new",
-            })
-
-            u = call("user.get_instance", u["id"])
-            assert u["sshpubkey"] == "new"
-
-
-def test_account_sets_ssh_key_on_user_create():
-    with dataset("home") as ds:
-        homedir = f"/mnt/{ds}"
-
-        with user({
-            "username": "test",
-            "full_name": "Test",
-            "home": homedir,
-            "password": "test1234",
-            "group_create": True,
-            "sshpubkey": "old",
-        }):
-            assert ssh(f"cat {homedir}/test/.ssh/authorized_keys") == "old\n"
-
-
-def test_account_delete_ssh_key_on_user_delete():
-    with dataset("home") as ds:
-        homedir = f"/mnt/{ds}"
-
-        with user({
-            "username": "test",
-            "full_name": "Test",
-            "home": homedir,
-            "password": "test1234",
-            "group_create": True,
-            "sshpubkey": "old",
-        }) as u:
-            call("user.delete", u["id"])
-
-            assert ssh(f"cat {homedir}/test/.ssh/authorized_keys", check=False) == ""
diff --git a/tests/api2/test_acl_by_who.py b/tests/api2/test_acl_by_who.py
deleted file mode 100644
index 04dd2dca9fff1..0000000000000
--- a/tests/api2/test_acl_by_who.py
+++ /dev/null
@@ -1,98 +0,0 @@
-from copy import deepcopy
-import os
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from truenas_api_client import ValidationErrors as ClientValidationErrors
-
-permset_posix_full = {"READ": True, "WRITE": True, "EXECUTE": True}
-permset_nfsv4_full = {"BASIC": "FULL_CONTROL"}
-flagset_nfsv4_inherit = {"BASIC": "INHERIT"}
-
-
-@pytest.fixture(scope='module')
-def posix_acl_dataset():
-    with dataset('posix') as ds:
-        yield ds
-
-
-@pytest.fixture(scope='module')
-def nfsv4_acl_dataset():
-    with dataset('nfs4', data={'share_type': 'SMB'}) as ds:
-        yield ds
-
-
-def test__posix_by_who(posix_acl_dataset):
-    target = os.path.join('/mnt', posix_acl_dataset)
-    the_acl = call('filesystem.getacl', target)['acl']
-    the_acl.extend([
-        {'tag': 'MASK', 'id': -1, 'perms': permset_posix_full, 'default': False},
-        {'tag': 'USER', 'who': 'root', 'perms': permset_posix_full, 'default': False},
-        {'tag': 'GROUP', 'who': 'root', 'perms': permset_posix_full, 'default': False},
-    ])
-
-    call('filesystem.setacl', {'path': target, 'dacl': the_acl}, job=True)
-
-    new_acl = call('filesystem.getacl', target)['acl']
-    saw_user = False
-    saw_group = False
-    for entry in new_acl:
-        if entry['tag'] == 'USER':
-            assert entry['id'] == 0
-            assert entry['perms'] == permset_posix_full
-            saw_user = True
-        elif entry['tag'] == 'GROUP':
-            assert entry['id'] == 0
-            assert entry['perms'] == permset_posix_full
-            saw_group = True
-
-    assert saw_user, str(new_acl)
-    assert saw_group, str(new_acl)
-
-
-def test__nfsv4_by_who(nfsv4_acl_dataset):
-    target = os.path.join('/mnt', nfsv4_acl_dataset)
-    the_acl = call('filesystem.getacl', target)['acl']
-    the_acl.extend([
-        {'tag': 'USER', 'who': 'root', 'perms': permset_nfsv4_full, 'flags': flagset_nfsv4_inherit, 'type': 'ALLOW'},
-        {'tag': 'GROUP', 'who': 'root', 'perms': permset_nfsv4_full, 'flags': flagset_nfsv4_inherit, 'type': 'ALLOW'},
-    ])
-
-    call('filesystem.setacl', {'path': target, 'dacl': the_acl}, job=True)
-
-    new_acl = call('filesystem.getacl', target)['acl']
-    saw_user = False
-    saw_group = False
-    for entry in new_acl:
-        if entry['tag'] == 'USER':
-            assert entry['id'] == 0
-            assert entry['perms'] == permset_nfsv4_full
-            saw_user = True
-        elif entry['tag'] == 'GROUP' and entry['id'] == 0:
-            assert entry['perms'] == permset_nfsv4_full
-            saw_group = True
-
-    assert saw_user, str(new_acl)
-    assert saw_group, str(new_acl)
-
-
-def test__acl_validation_errors_posix(posix_acl_dataset):
-    target = os.path.join('/mnt', posix_acl_dataset)
-    the_acl = call('filesystem.getacl', target)['acl']
-
-    new_acl = deepcopy(the_acl)
-    new_acl.extend([
-        {'tag': 'USER', 'perms': permset_posix_full, 'default': False},
-    ])
-
-    with pytest.raises(ClientValidationErrors):
-        call('filesystem.setacl', {'path': target, 'dacl': new_acl}, job=True)
-
-    new_acl = deepcopy(the_acl)
-    new_acl.extend([
-        {'tag': 'USER', 'perms': permset_posix_full, 'default': False, 'who': 'root', 'id': 0},
-    ])
-
-    with pytest.raises(ClientValidationErrors):
-        call('filesystem.setacl', {'path': target, 'dacl': new_acl}, job=True)
diff --git a/tests/api2/test_acltype.py b/tests/api2/test_acltype.py
deleted file mode 100644
index 81bc2f90dac08..0000000000000
--- a/tests/api2/test_acltype.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import pytest
-
-from auto_config import pool_name
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.pool import dataset
-
-
-def query_filters(ds_name):
-    return [['id', '=', ds_name]], {'get': True, 'extra': {'retrieve_children': False}}
-
-
-@pytest.fixture(scope='module')
-def temp_ds():
-    with dataset('test1') as ds:
-        yield ds
-
-
-def test_default_acltype_on_zpool():
-    assert 'POSIXACL' in call('filesystem.statfs', f'/mnt/{pool_name}')['flags']
-
-
-def test_acltype_inheritance(temp_ds):
-    assert call('zfs.dataset.query', *query_filters(temp_ds))['properties']['acltype']['rawvalue'] == 'posix'
-
-
-@pytest.mark.parametrize(
-    'change,expected', [
-        (
-            {'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH'},
-            (('acltype', 'value', 'nfsv4'), ('aclmode', 'value', 'passthrough'), ('aclinherit', 'value', 'passthrough'))
-        ),
-        (
-            {'acltype': 'POSIX', 'aclmode': 'DISCARD'},
-            (('acltype', 'value', 'posix'), ('aclmode', 'value', 'discard'), ('aclinherit', 'value', 'discard'))
-        ),
-    ],
-    ids=['NFSV4_PASSTHROUGH', 'POSIX_DISCARD']
-)
-def test_change_acltype_and_aclmode_to_(temp_ds, change, expected):
-    call('pool.dataset.update', temp_ds, change)
-    props = call('zfs.dataset.query', *query_filters(temp_ds))['properties']
-    for tkey, skey, value in expected:
-        assert props[tkey][skey] == value, props[tkey][skey]
diff --git a/tests/api2/test_alert.py b/tests/api2/test_alert.py
deleted file mode 100644
index 8a79d39b4004e..0000000000000
--- a/tests/api2/test_alert.py
+++ /dev/null
@@ -1,74 +0,0 @@
-from time import sleep
-
-import pytest
-
-from auto_config import pool_name
-from middlewared.test.integration.utils import call, ssh
-
-
-ID_PATH = "/dev/disk/by-partuuid/"
-
-
-def get_alert_by_id(alert_id):
-    return next(filter(lambda alert: alert["id"] == alert_id, call("alert.list")), None)
-
-
-def wait_for_alert(timeout=120):
-    for _ in range(timeout):
-        for alert in call("alert.list"):
-            if (
-                alert["source"] == "VolumeStatus" and
-                alert["args"]["volume"] == pool_name and
-                alert["args"]["state"] == "DEGRADED"
-            ):
-                return alert["id"]
-        sleep(1)
-
-
-@pytest.fixture(scope="module")
-def degraded_pool_gptid():
-    get_pool = call("pool.query", [["name", "=", pool_name]], {"get": True})
-    gptid = get_pool["topology"]["data"][0]["path"].replace(ID_PATH, "")
-    ssh(f"zinject -d {gptid} -A fault {pool_name}")
-    return gptid
-
-
-@pytest.fixture(scope="module")
-def alert_id(degraded_pool_gptid):
-    call("alert.process_alerts")
-    result = wait_for_alert()
-    if result is None:
-        pytest.fail("Timed out while waiting for alert.")
-    return result
-
-
-def test_verify_the_pool_is_degraded(degraded_pool_gptid):
-    status = call("zpool.status", {"name": pool_name})
-    disk_status = status[pool_name]["data"][ID_PATH + degraded_pool_gptid]["disk_status"]
-    assert disk_status == "DEGRADED"
-
-
-def test_dismiss_alert(alert_id):
-    call("alert.dismiss", alert_id)
-    alert = get_alert_by_id(alert_id)
-    assert alert["dismissed"] is True, alert
-
-
-def test_restore_alert(alert_id):
-    call("alert.restore", alert_id)
-    alert = get_alert_by_id(alert_id)
-    assert alert["dismissed"] is False, alert
-
-
-def test_clear_the_pool_degradation(degraded_pool_gptid):
-    ssh(f"zpool clear {pool_name}")
-    status = call("zpool.status", {"name": pool_name})
-    disk_status = status[pool_name]["data"][ID_PATH + degraded_pool_gptid]["disk_status"]
-    assert disk_status != "DEGRADED"
-
-
-@pytest.mark.timeout(120)
-def test_wait_for_the_alert_to_disappear(alert_id):
-    call("alert.process_alerts")
-    while get_alert_by_id(alert_id) is not None: 
-        sleep(1)
diff --git a/tests/api2/test_alert_classes.py b/tests/api2/test_alert_classes.py
deleted file mode 100644
index d631a7b6d7b94..0000000000000
--- a/tests/api2/test_alert_classes.py
+++ /dev/null
@@ -1,59 +0,0 @@
-from unittest.mock import ANY
-
-import pytest
-from pytest_dependency import depends
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call
-
-
-
-def test__normal_alert_class():
-    value = {
-        "classes": {
-            "UPSBatteryLow": {
-                "level": "CRITICAL",
-                "policy": "IMMEDIATELY",
-            },
-        },
-    }
-
-    call("alertclasses.update", value)
-
-    assert call("alertclasses.config") == {"id": ANY, **value}
-
-
-def test__nonexisting_alert_class():
-    with pytest.raises(ValidationErrors) as ve:
-        call("alertclasses.update", {
-            "classes": {
-                "Invalid": {
-                    "level": "WARNING",
-                },
-            },
-        })
-
-    assert ve.value.errors[0].attribute == "alert_class_update.classes.Invalid"
-
-
-def test__disable_proactive_support_for_valid_alert_class(request):
-    call("alertclasses.update", {
-        "classes": {
-            "ZpoolCapacityNotice": {
-                "proactive_support": False,
-            },
-        },
-    })
-
-
-def test__disable_proactive_support_for_invalid_alert_class(request):
-    with pytest.raises(ValidationErrors) as ve:
-        call("alertclasses.update", {
-            "classes": {
-                "UPSBatteryLow": {
-                    "proactive_support": False,
-                },
-            },
-        })
-
-    assert ve.value.errors[0].attribute == "alert_class_update.classes.UPSBatteryLow.proactive_support"
diff --git a/tests/api2/test_api_key.py b/tests/api2/test_api_key.py
deleted file mode 100644
index 9787c4575ab39..0000000000000
--- a/tests/api2/test_api_key.py
+++ /dev/null
@@ -1,258 +0,0 @@
-import errno
-import pytest
-
-from datetime import datetime, UTC
-from middlewared.service_exception import CallError, ValidationErrors
-from middlewared.test.integration.assets.api_key import api_key
-from middlewared.test.integration.utils import call, client
-from time import sleep
-
-LEGACY_ENTRY_KEY = 'rtpz6u16l42XJJGy5KMJOVfkiQH7CyitaoplXy7TqFTmY7zHqaPXuA1ob07B9bcB'
-LEGACY_ENTRY_HASH = '$pbkdf2-sha256$29000$CyGktHYOwXgvBYDQOqc05g$nK1MMvVuPGHMvUENyR01qNsaZjgGmlt3k08CRuC4aTI'
-
-
-@pytest.fixture(scope='function')
-def sharing_admin_user(unprivileged_user_fixture):
-    privilege = call('privilege.query', [['local_groups.0.group', '=', unprivileged_user_fixture.group_name]])
-    assert len(privilege) > 0, 'Privilege not found'
-    call('privilege.update', privilege[0]['id'], {'roles': ['SHARING_ADMIN']})
-
-    try:
-        yield unprivileged_user_fixture
-    finally:
-        call('privilege.update', privilege[0]['id'], {'roles': []})
-
-
-def check_revoked_alert():
-    # reset any revoked alert
-    call('api_key.check_status')
-
-    for a in call('alert.list'):
-        if a['klass'] == 'ApiKeyRevoked':
-            return a
-
-    return None
-
-
-def test_user_unprivileged_api_key_failure(unprivileged_user_fixture):
-    """We should be able to call a method with root API key using Websocket."""
-    with pytest.raises(ValidationErrors) as ve:
-        with api_key(unprivileged_user_fixture.username):
-            pass
-
-    assert 'User lacks privilege role membership' in ve.value.errors[0].errmsg
-
-
-def test_api_key_nonexistent_username():
-    """Non-existent user should raise a validation error."""
-    with pytest.raises(ValidationErrors) as ve:
-        with api_key('canary'):
-            pass
-
-    assert 'User does not exist' in ve.value.errors[0].errmsg
-
-
-def test_print_expired_api_key_update_failure():
-    with pytest.raises(ValidationErrors) as ve:
-        with api_key():
-            key = call('api_key.query', [], {'get': True})
-            expiry = datetime.fromtimestamp(1, UTC)
-            call('api_key.update', key['id'], {'expires_at': expiry})
-
-    assert 'Expiration date is in the past' in ve.value.errors[0].errmsg
-
-
-def test_api_key_info(sharing_admin_user):
-    with api_key(sharing_admin_user.username):
-        key_info = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})
-        assert key_info['revoked'] is False
-        assert key_info['expires_at'] is None
-        assert key_info['local'] is True
-
-        user = call('user.query', [['username', '=', sharing_admin_user.username]], {'get': True})
-        assert user['api_keys'] == [key_info['id']]
-
-
-@pytest.mark.parametrize('endpoint', ['LEGACY', 'CURRENT'])
-def test_api_key_session(sharing_admin_user, endpoint):
-    with api_key(sharing_admin_user.username) as key:
-        with client(auth=None) as c:
-            match endpoint:
-                case 'LEGACY':
-                    assert c.call('auth.login_with_api_key', key)
-                case 'CURRENT':
-                    resp = c.call('auth.login_ex', {
-                        'mechanism': 'API_KEY_PLAIN',
-                        'username': sharing_admin_user.username,
-                        'api_key': key
-                    })
-                    assert resp['response_type'] == 'SUCCESS'
-                case _:
-                    raise ValueError(f'{endpoint}: unknown endpoint')
-
-            session = c.call('auth.sessions', [['current', '=', True]], {'get': True})
-            assert session['credentials'] == 'API_KEY'
-            assert session['credentials_data']['api_key']['name'] == 'Test API Key'
-
-            me = c.call('auth.me')
-            assert me['pw_name'] == sharing_admin_user.username
-            assert 'SHARING_ADMIN' in me['privilege']['roles']
-            assert 'API_KEY' in me['account_attributes']
-
-            call("auth.terminate_session", session['id'])
-
-            with pytest.raises(Exception):
-                c.call('system.info')
-
-
-def test_legacy_api_key_upgrade():
-    """We should automatically upgrade old hashes on successful login"""
-    with api_key():
-        key_id = call('api_key.query', [['username', '=', 'root']], {'get': True})['id']
-        call('datastore.update', 'account.api_key', key_id, {
-            'key': LEGACY_ENTRY_HASH,
-            'user_identifier': 'LEGACY_API_KEY'
-        })
-        call('etc.generate', 'pam_middleware')
-
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': 'root',
-                'api_key': f'{key_id}-{LEGACY_ENTRY_KEY}'
-            })
-            assert resp['response_type'] == 'SUCCESS'
-
-            # We should have replaced hash on auth
-            updated = call('api_key.query', [['username', '=', 'root']], {'get': True})
-            assert updated['keyhash'] != LEGACY_ENTRY_HASH
-            assert updated['keyhash'].startswith('$pbkdf2-sha512')
-
-        # verify we still have access
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': 'root',
-                'api_key': f'{key_id}-{LEGACY_ENTRY_KEY}'
-            })
-            assert resp['response_type'] == 'SUCCESS'
-
-
-def test_legacy_api_key_reject_nonroot(sharing_admin_user):
-    """Old hash style should be rejected for non-root user."""
-    with api_key(sharing_admin_user.username):
-        key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
-        call('datastore.update', 'account.api_key', key_id, {'key': LEGACY_ENTRY_HASH})
-        call('etc.generate', 'pam_middleware')
-
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': sharing_admin_user.username,
-                'api_key': LEGACY_ENTRY_KEY
-            })
-            assert resp['response_type'] == 'AUTH_ERR'
-
-
-def test_api_key_expired(sharing_admin_user):
-    """Expired keys should fail with expected response type"""
-    with api_key(sharing_admin_user.username) as key:
-        key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
-        call('datastore.update', 'account.api_key', key_id, {'expiry': 1})
-
-        # update our pam_tdb file with new expiration
-        call('etc.generate', 'pam_middleware')
-
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': sharing_admin_user.username,
-                'api_key': key
-            })
-            assert resp['response_type'] == 'EXPIRED'
-
-
-def test_key_revoked(sharing_admin_user):
-    """Revoked key should raise an AUTH_ERR"""
-    with api_key(sharing_admin_user.username) as key:
-        key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
-        call('datastore.update', 'account.api_key', key_id, {'expiry': -1})
-
-        # update our pam_tdb file with revocation
-        call('etc.generate', 'pam_middleware')
-
-        revoked = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['revoked']
-        assert revoked is True
-
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': sharing_admin_user.username,
-                'api_key': key
-            })
-            assert resp['response_type'] == 'AUTH_ERR'
-
-        assert check_revoked_alert() is not None
-        call('datastore.update', 'account.api_key', key_id, {'expiry': 0})
-        sleep(1)
-        alert = check_revoked_alert()
-        assert alert is None, str(alert)
-
-
-def test_api_key_reset(sharing_admin_user):
-    with api_key(sharing_admin_user.username) as key:
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': sharing_admin_user.username,
-                'api_key': key
-            })
-            assert resp['response_type'] == 'SUCCESS'
-
-        key_id = call('api_key.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
-        updated = call("api_key.update", key_id, {"reset": True})
-
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': sharing_admin_user.username,
-                'api_key': key
-            })
-            assert resp['response_type'] == 'AUTH_ERR'
-
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'API_KEY_PLAIN',
-                'username': sharing_admin_user.username,
-                'api_key': updated['key']
-            })
-            assert resp['response_type'] == 'SUCCESS'
-
-
-def test_api_key_crud_restricted_admin_own_keys(sharing_admin_user):
-    with client(auth=(sharing_admin_user.username, sharing_admin_user.password)) as c:
-        key_info = c.call('api_key.create', {
-            'username': sharing_admin_user.username,
-            'name': 'test_restricted_admin_key',
-        })
-
-        try:
-            updated = c.call('api_key.update', key_info['id'], {
-                'name': 'test_restricted_admin_key_new'
-            })
-            assert 'key' not in updated
-            updated = c.call('api_key.update', key_info['id'], {'reset': True})
-            assert updated['key'] is not '********'
-        finally:
-            c.call('api_key.delete', key_info['id'])
-
-
-def test_api_key_restrict_admin_other_keys_fail(sharing_admin_user):
-    with client(auth=(sharing_admin_user.username, sharing_admin_user.password)) as c:
-        with pytest.raises(CallError) as ce:
-            c.call('api_key.create', {
-                'username': 'root',
-                'name': 'test_restricted_admin_key',
-            })
-
-        assert ce.value.errno == errno.EACCES
diff --git a/tests/api2/test_apps.py b/tests/api2/test_apps.py
deleted file mode 100644
index 6edf0602978e9..0000000000000
--- a/tests/api2/test_apps.py
+++ /dev/null
@@ -1,269 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call, client
-from middlewared.test.integration.assets.apps import app
-from middlewared.test.integration.assets.docker import docker
-from middlewared.test.integration.assets.pool import another_pool
-from truenas_api_client import ValidationErrors
-
-
-CUSTOM_CONFIG = {
-    'services': {
-        'actual_budget': {
-            'user': '568:568',
-            'image': 'actualbudget/actual-server:24.10.1',
-            'restart': 'unless-stopped',
-            'deploy': {
-                'resources': {
-                    'limits': {
-                        'cpus': '2',
-                        'memory': '4096M'
-                    }
-                }
-            },
-            'devices': [],
-            'depends_on': {
-                'permissions': {
-                    'condition': 'service_completed_successfully'
-                }
-            },
-            'cap_drop': ['ALL'],
-            'security_opt': ['no-new-privileges'],
-            'healthcheck': {
-                'interval': '10s',
-                'retries': 30,
-                'start_period': '10s',
-                'test': (
-                    "/bin/bash -c 'exec {health_check_fd}< /dev/tcp/127.0.0.1/31012 "
-                    "&& echo -e 'GET /health HTTP/1.1\\r\\nHost: 127.0.0.1\\r\\n"
-                    "Connection: close\\r\\n\\r\\n' >&$$health_check_fd && "
-                    "cat <&$$health_check_fd'"
-                ),
-                'timeout': '5s'
-            },
-            'environment': {
-                'ACTUAL_HOSTNAME': '0.0.0.0',
-                'ACTUAL_PORT': '31012',
-                'ACTUAL_SERVER_FILES': '/data/server-files',
-                'ACTUAL_USER_FILES': '/data/user-files',
-                'GID': '568',
-                'GROUP_ID': '568',
-                'NODE_ENV': 'production',
-                'PGID': '568',
-                'PUID': '568',
-                'TZ': 'Etc/UTC',
-                'UID': '568',
-                'USER_ID': '568'
-            },
-            'ports': [
-                {
-                    'host_ip': '0.0.0.0',
-                    'mode': 'ingress',
-                    'protocol': 'tcp',
-                    'published': 31012,
-                    'target': 31012
-                }
-            ]
-        },
-        'permissions': {
-            'command': [
-                '''
-                function process_dir() {
-                    local dir=$$1
-                    local mode=$$2
-                    local uid=$$3
-                    local gid=$$4
-                    local chmod=$$5
-                    local is_temporary=$$6
-                    # Process directory logic here...
-                }
-                process_dir /mnt/actual_budget/config check 568 568 false false
-                '''
-            ],
-            'deploy': {
-                'resources': {
-                    'limits': {
-                        'cpus': '1.0',
-                        'memory': '512m'
-                    }
-                }
-            },
-            'entrypoint': ['bash', '-c'],
-            'image': 'bash',
-            'user': 'root'
-        }
-    },
-    'x-portals': [
-        {
-            'host': '0.0.0.0',
-            'name': 'Web UI',
-            'path': '/',
-            'port': 31012,
-            'scheme': 'http'
-        }
-    ],
-    'x-notes': '''# Welcome to TrueNAS SCALE
-
-    Thank you for installing Actual Budget!
-
-    ## Documentation
-    Documentation for Actual Budget can be found at https://www.truenas.com/docs.
-
-    ## Bug reports
-    If you find a bug in this app, please file an issue at
-    https://ixsystems.atlassian.net or https://github.com/truenas/apps.
-
-    ## Feature requests or improvements
-    If you find a feature request for this app, please file an issue at
-    https://ixsystems.atlassian.net or https://github.com/truenas/apps.
-    '''
-}
-
-INVALID_YAML = '''
-services:
-  actual_budget
-    user: 568:568
-    image: actualbudget/actual-server:24.10.1
-    restart: unless-stopped
-    deploy:
-      resources: {'limits': {'cpus': '2', 'memory': '4096M'}}
-    devices: []
-    depends_on:
-      permissions:
-        condition: service_completed_successfully
-    cap_drop: ['ALL']
-    security_opt: ['no-new-privileges']
-'''
-
-
-@pytest.fixture(scope='module')
-def docker_pool():
-    with another_pool() as pool:
-        with docker(pool) as docker_config:
-            yield docker_config
-
-
-def test_create_catalog_app(docker_pool):
-    with app('actual-budget', {
-        'train': 'community',
-        'catalog_app': 'actual-budget',
-    }) as app_info:
-        assert app_info['name'] == 'actual-budget', app_info
-        assert app_info['state'] == 'DEPLOYING', app_info
-        volume_ds = call('app.get_app_volume_ds', 'actual-budget')
-        assert volume_ds is not None, volume_ds
-
-
-def test_create_custom_app(docker_pool):
-    with app('custom-budget', {
-        'custom_app': True,
-        'custom_compose_config': CUSTOM_CONFIG,
-    }) as app_info:
-        assert app_info['name'] == 'custom-budget'
-        assert app_info['state'] == 'DEPLOYING'
-
-
-def test_create_custom_app_validation_error(docker_pool):
-    with pytest.raises(ValidationErrors):
-        with app('custom-budget', {
-            'custom_app': False,
-            'custom_compose_config': CUSTOM_CONFIG,
-        }):
-            pass
-
-
-def test_create_custom_app_invalid_yaml(docker_pool):
-    with pytest.raises(ValidationErrors):
-        with app('custom-budget', {
-            'custom_app': True,
-            'custom_compose_config': INVALID_YAML,
-        }):
-            pass
-
-
-def test_delete_app_validation_error_for_non_existent_app(docker_pool):
-    with pytest.raises(ValidationErrors):
-        call('app.delete', 'actual-budget', {'remove_ix_volumes': True, 'remove_images': True}, job=True)
-
-
-def test_delete_app_options(docker_pool):
-    with app(
-        'custom-budget',
-        {
-            'custom_app': True,
-            'custom_compose_config': CUSTOM_CONFIG,
-        },
-        {'remove_ix_volumes': True, 'remove_images': True}
-    ) as app_info:
-        assert app_info['name'] == 'custom-budget'
-        assert app_info['state'] == 'DEPLOYING'
-
-    app_images = call('app.image.query', [['repo_tags', '=', ['actualbudget/actual-server:24.10.1']]])
-    assert len(app_images) == 0
-    volume_ds = call('app.get_app_volume_ds', 'custom-budget')
-    assert volume_ds is None
-
-
-def test_update_app(docker_pool):
-    values = {
-        'values': {
-            'network': {
-                'web_port': 32000
-            },
-            'resources': {
-                'limits': {
-                    'memory': 8192
-                }
-            }
-        }
-    }
-    with app('actual-budget', {
-        'train': 'community',
-        'catalog_app': 'actual-budget',
-    }) as app_info:
-        app_info = call('app.update', app_info['name'], values, job=True)
-        assert app_info['active_workloads']['used_ports'][0]['host_ports'][0]['host_port'] == 32000
-
-
-def test_stop_start_app(docker_pool):
-    with app('actual-budget', {
-        'train': 'community',
-        'catalog_app': 'actual-budget'
-    }):
-        # stop running app
-        call('app.stop', 'actual-budget', job=True)
-        states = call('app.query', [], {'select': ['state']})[0]
-        assert states['state'] == 'STOPPED'
-
-        # start stopped app
-        call('app.start', 'actual-budget', job=True)
-        states = call('app.query', [], {'select': ['state']})[0]
-        assert states['state'] == 'DEPLOYING'
-
-
-def test_event_subscribe(docker_pool):
-    with client(py_exceptions=False) as c:
-        expected_event_type_order = ['ADDED', 'CHANGED']
-        expected_event_order = ['STOPPING', 'STOPPED', 'DEPLOYING']
-        events = []
-        event_types = []
-
-        def callback(event_type, **message):
-            nonlocal events, event_types
-            if not events or events[-1] != message['fields']['state']:
-                events.append(message['fields']['state'])
-            if not event_types or event_types[-1] != event_type:
-                event_types.append(event_type)
-
-        c.subscribe('app.query', callback, sync=True)
-
-        with app('ipfs', {
-            'train': 'community',
-            'catalog_app': 'ipfs'
-        }):
-            events = []
-            call('app.stop', 'ipfs', job=True)
-            call('app.start', 'ipfs', job=True)
-            assert expected_event_order == events
-
-        assert expected_event_type_order == event_types
diff --git a/tests/api2/test_apps_images_roles.py b/tests/api2/test_apps_images_roles.py
deleted file mode 100644
index e32cd958ddd0d..0000000000000
--- a/tests/api2/test_apps_images_roles.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize('method, role, valid_role, valid_role_exception', (
-    ('app.image.query', 'APPS_READ', True, False),
-    ('app.image.query', 'APPS_WRITE', True, False),
-    ('app.image.query', 'DOCKER_READ', False, False),
-    ('app.image.pull', 'APPS_READ', False, False),
-    ('app.image.pull', 'APPS_WRITE', True, False),
-    ('app.image.delete', 'APPS_READ', False, False),
-    ('app.image.delete', 'APPS_WRITE', True, True),
-))
-def test_apps_roles(unprivileged_user_fixture, method, role, valid_role, valid_role_exception):
-    common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=valid_role_exception)
diff --git a/tests/api2/test_apps_roles.py b/tests/api2/test_apps_roles.py
deleted file mode 100644
index 2a4144e986b20..0000000000000
--- a/tests/api2/test_apps_roles.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize('method, role, valid_role, valid_role_exception', (
-    ('app.query', 'APPS_READ', True, False),
-    ('app.query', 'APPS_WRITE', True, False),
-    ('app.query', 'DOCKER_READ', False, False),
-    ('app.config', 'APPS_READ', True, True),
-    ('app.config', 'APPS_WRITE', True, True),
-    ('app.config', 'DOCKER_READ', False, False),
-    ('app.update', 'APPS_READ', False, False),
-    ('app.update', 'APPS_WRITE', True, True),
-    ('app.create', 'APPS_READ', False, False),
-    ('app.create', 'APPS_WRITE', True, True),
-    ('app.delete', 'APPS_READ', False, False),
-    ('app.delete', 'APPS_WRITE', True, True),
-    ('app.convert_to_custom', 'APPS_READ', False, False),
-    ('app.convert_to_custom', 'APPS_WRITE', True, True),
-))
-def test_apps_roles(unprivileged_user_fixture, method, role, valid_role, valid_role_exception):
-    common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=valid_role_exception)
diff --git a/tests/api2/test_attachment_querying.py b/tests/api2/test_attachment_querying.py
deleted file mode 100644
index 348af8f390a2e..0000000000000
--- a/tests/api2/test_attachment_querying.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import sys
-from pytest_dependency import depends
-
-sys.path.append(os.getcwd())
-
-from middlewared.test.integration.assets.nfs import nfs_share
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, client
-
-
-PARENT_DATASET = 'test_parent'
-CHILD_DATASET = f'{PARENT_DATASET}/child_dataset'
-
-
-def test_attachment_with_child_path(request):
-    with dataset(PARENT_DATASET) as parent_dataset:
-        parent_path = f'/mnt/{parent_dataset}'
-        assert call('pool.dataset.attachments_with_path', parent_path) == []
-
-        with nfs_share(parent_dataset):
-            attachments = call('pool.dataset.attachments_with_path', parent_path)
-            assert len(attachments) > 0, attachments
-            assert attachments[0]['type'] == 'NFS Share', attachments
-
-            with dataset(CHILD_DATASET) as child_dataset:
-                child_path = f'/mnt/{child_dataset}'
-                attachments = call('pool.dataset.attachments_with_path', child_path)
-                assert len(attachments) == 0, attachments
-
-                attachments = call('pool.dataset.attachments_with_path', child_path, True)
-                assert len(attachments) == 1, attachments
-                assert attachments[0]['type'] == 'NFS Share', attachments
diff --git a/tests/api2/test_audit_alerts.py b/tests/api2/test_audit_alerts.py
deleted file mode 100644
index ac1d64a9c9b85..0000000000000
--- a/tests/api2/test_audit_alerts.py
+++ /dev/null
@@ -1,123 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call, ssh, mock
-from time import sleep
-
-
-@pytest.fixture(scope='function')
-def setup_state(request):
-    """
-    Parametrize the test setup
-    The hope was that both 'backend' and 'setup' one-shot tests would be similar, however
-    the 'setup' test ended up requiring 'with mock'
-    """
-    path = '/audit'
-    alert_key = request.param[0]
-    if alert_key is not None:
-        path += f"/{alert_key}.db"
-    alert_class = request.param[1]
-    restore_data = None
-    try:
-        # Remove any pre-existing alert cruft
-        call('alert.oneshot_delete', alert_class, alert_key if alert_key is None else {'service': alert_key})
-
-        alerts = call("alert.list")
-        class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
-        assert len(class_alerts) == 0, class_alerts
-        match alert_class:
-            case 'AuditBackendSetup':
-                # A file in the dataset: set it immutable
-                ssh(f'chattr +i {path}')
-                lsattr = ssh(f'lsattr {path}')
-                assert lsattr[4] == 'i', lsattr
-                restore_data = path
-            case 'AuditDatasetCleanup':
-                # Directly tweak the zfs settings
-                call(
-                    "zfs.dataset.update",
-                    "boot-pool/ROOT/24.10.0-MASTER-20240709-021413/audit",
-                    {"properties": {"org.freenas:refquota_warning": {"parsed": "70"}}}
-                )
-            case _:
-                pass
-        yield request.param
-    finally:
-        match alert_class:
-            case 'AuditBackendSetup':
-                # Remove immutable flag from file
-                assert restore_data != ""
-                ssh(f'chattr -i {restore_data}')
-                lsattr = ssh(f'lsattr {restore_data}')
-                assert lsattr[4] == '-', lsattr
-                # Restore backend file descriptors and dismiss alerts
-                call('auditbackend.setup')
-            case 'AuditSetup':
-                # Dismiss alerts
-                call('audit.setup')
-            case _:
-                pass
-        # call('alert.oneshot_delete', alert_class, alert_key if alert_key is None else {'service': alert_key})
-        sleep(1)
-        alerts = call("alert.list")
-        class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
-        assert len(class_alerts) == 0, class_alerts
-
-
-@pytest.mark.parametrize(
-    'setup_state', [
-        ['SMB', 'AuditBackendSetup', 'auditbackend.setup'],
-    ],
-    indirect=True
-)
-def test_audit_backend_alert(setup_state):
-    db_path, alert_class, audit_method = setup_state
-    call(audit_method)
-    sleep(1)
-    alerts = call("alert.list")
-    class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
-    assert len(class_alerts) > 0, class_alerts
-    assert class_alerts[0]['klass'] == 'AuditBackendSetup', class_alerts
-    assert class_alerts[0]['args']['service'] == db_path, class_alerts
-    assert class_alerts[0]['formatted'].startswith("Audit service failed backend setup"), class_alerts
-
-
-@pytest.mark.parametrize(
-    'setup_state', [
-        [None, 'AuditSetup', 'audit.setup']
-    ],
-    indirect=True
-)
-def test_audit_setup_alert(setup_state):
-    with mock("audit.update_audit_dataset", """
-        from middlewared.service import private
-        @private
-        async def mock(self, new):
-            raise Exception()
-    """):
-        unused, alert_class, audit_method = setup_state
-        call(audit_method)
-        sleep(1)
-        alerts = call("alert.list")
-        class_alerts = [alert for alert in alerts if alert['klass'] == alert_class]
-        assert len(class_alerts) > 0, class_alerts
-        assert class_alerts[0]['klass'] == 'AuditSetup', class_alerts
-        assert class_alerts[0]['formatted'].startswith("Audit service failed to complete setup"), class_alerts
-
-
-def test_audit_health_monitor_alert():
-    with mock("auditbackend.query", """
-        from middlewared.service import private
-        from middlewared.schema import accepts, List, Dict, Str
-        @private
-        @accepts(
-            Str('db_name', required=True),
-            List('query-filters'),
-            Dict('query-options', additional_attrs=True)
-        )
-        async def mock(self, db_name, filters, options):
-            raise CallError('TEST_SERVICE: connection to audit database is not initialized.')
-    """):
-        alert = call("alert.run_source", "AuditServiceHealth")[0]
-        assert alert['source'] == 'AuditServiceHealth', f"Received source: {alert['source']}"
-        assert alert['text'].startswith("Failed to perform audit query"), f"Received text: {alert['text']}"
-        assert "connection to audit database is not initialized" in alert['args']['verrs'], f"Received args: {alert['args']}"
diff --git a/tests/api2/test_audit_api_key.py b/tests/api2/test_audit_api_key.py
deleted file mode 100644
index dcf4a30a533f1..0000000000000
--- a/tests/api2/test_audit_api_key.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import datetime
-
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-API_KEY_NAME = 'AUDIT_API_KEY'
-
-
-def test_api_key_audit():
-    payload = {'username': 'root', 'name': API_KEY_NAME}
-    payload2 = {'expires_at': None}
-    api_key_id = None
-
-    try:
-        with expect_audit_method_calls([{
-            'method': 'api_key.create',
-            'params': [payload],
-            'description': f'Create API key {API_KEY_NAME}',
-        }]):
-            api_key = call('api_key.create', payload)
-            api_key_id = api_key['id']
-
-            # Set expiration 60 minutes in future
-            payload2['expires_at'] = api_key['created_at'] + datetime.timedelta(minutes=60)
-
-        with expect_audit_method_calls([{
-            'method': 'api_key.update',
-            'params': [api_key_id, payload2],
-            'description': f'Update API key {API_KEY_NAME}',
-        }]):
-            call('api_key.update', api_key_id, payload2)
-
-    finally:
-        if api_key_id:
-            with expect_audit_method_calls([{
-                'method': 'api_key.delete',
-                'params': [api_key_id],
-                'description': f'Delete API key {API_KEY_NAME}',
-            }]):
-                call('api_key.delete', api_key_id)
diff --git a/tests/api2/test_audit_audit.py b/tests/api2/test_audit_audit.py
deleted file mode 100644
index d224c3f7081da..0000000000000
--- a/tests/api2/test_audit_audit.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import os
-
-import requests
-import time
-import operator
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call, url
-from middlewared.test.integration.utils.audit import expect_audit_log, expect_audit_method_calls
-from unittest.mock import ANY
-
-
-# =====================================================================
-#                     Fixtures and utilities
-# =====================================================================
-@pytest.fixture(scope='class')
-def report_exists(request):
-    report_pathname = request.config.cache.get('report_pathname', None)
-    assert report_pathname is not None
-    yield report_pathname
-
-
-# =====================================================================
-#                           Tests
-# =====================================================================
-@pytest.mark.parametrize('payload,success', [
-    ({'retention': 20}, True),
-    ({'retention': 0}, False)
-])
-def test_audit_config_audit(payload, success):
-    '''
-    Test the auditing of Audit configuration changes
-    '''
-    initial_audit_config = call('audit.config')
-    rest_operator = operator.eq if success else operator.ne
-    expected_log_template = {
-        'service_data': {
-            'vers': {
-                'major': 0,
-                'minor': 1,
-            },
-            'origin': ANY,
-            'protocol': 'WEBSOCKET',
-            'credentials': {
-                'credentials': 'LOGIN_PASSWORD',
-                'credentials_data': {'username': 'root', 'login_at': ANY},
-            },
-        },
-        'event': 'METHOD_CALL',
-        'event_data': {
-            'authenticated': True,
-            'authorized': True,
-            'method': 'audit.update',
-            'params': [payload],
-            'description': 'Update Audit Configuration',
-        },
-        'success': success
-    }
-    try:
-        with expect_audit_log([expected_log_template]):
-            if success:
-                call('audit.update', payload)
-            else:
-                with pytest.raises(ValidationErrors):
-                    call('audit.update', payload)
-    finally:
-        # Restore initial state
-        restore_payload = {
-            'retention': initial_audit_config['retention'],
-        }
-        call('audit.update', restore_payload)
-
-
-def test_audit_export_audit(request):
-    '''
-    Test the auditing of the audit export function
-    '''
-    payload = {
-        'export_format': 'CSV'
-    }
-    with expect_audit_method_calls([{
-        'method': 'audit.export',
-        'params': [payload],
-        'description': 'Export Audit Data',
-    }]):
-        report_pathname = call('audit.export', payload, job=True)
-        request.config.cache.set('report_pathname', report_pathname)
-
-
-class TestAuditDownload:
-    '''
-    Wrap these tests in a class for the 'report_exists' fixture
-    '''
-    def test_audit_download_audit(self, report_exists):
-        '''
-        Test the auditing of the audit download function
-        '''
-        report_pathname = report_exists
-        st = call('filesystem.stat', report_pathname)
-
-        init_audit_query = call('audit.query', {
-            'query-filters': [['event_data.method', '=', 'audit.download_report']],
-            'query-options': {'select': ['event_data', 'success']}
-        })
-        init_len = len(init_audit_query)
-
-        report_name = os.path.basename(report_pathname)
-        payload = {
-            'report_name': report_name
-        }
-        job_id, download_data = call(
-            'core.download', 'audit.download_report', [payload], 'report.csv'
-        )
-        r = requests.get(f'{url()}{download_data}')
-        r.raise_for_status()
-        assert len(r.content) == st['size']
-
-        post_audit_query = call('audit.query', {
-            'query-filters': [['event_data.method', '=', 'audit.download_report']],
-            'query-options': {'select': ['event_data', 'success']}
-        })
-        post_len = len(post_audit_query)
-
-        # This usually requires only one cycle
-        count_down = 10
-        while count_down > 0 and post_len == init_len:
-            time.sleep(1)
-            count_down -= 1
-            post_audit_query = call('audit.query', {
-                'query-filters': [['event_data.method', '=', 'audit.download_report']],
-                'query-options': {'select': ['event_data', 'success']}
-            })
-            post_len = len(post_audit_query)
-
-        assert count_down > 0, 'Timed out waiting for the audit entry'
-        assert post_len > init_len
-
-        # Confirm this download is recorded
-        entry = post_audit_query[-1]
-        event_data = entry['event_data']
-        params = event_data['params'][0]
-        assert report_name in params['report_name']
diff --git a/tests/api2/test_audit_basic.py b/tests/api2/test_audit_basic.py
deleted file mode 100644
index 8752fe991350d..0000000000000
--- a/tests/api2/test_audit_basic.py
+++ /dev/null
@@ -1,375 +0,0 @@
-from middlewared.service_exception import ValidationError, CallError
-from middlewared.test.integration.assets.account import user, unprivileged_user_client
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call, url
-from middlewared.test.integration.utils.audit import get_audit_entry
-
-from auto_config import ha
-from protocols import smb_connection
-from time import sleep
-
-import os
-import pytest
-import requests
-import secrets
-import string
-
-
-SMBUSER = 'audit-smb-user'
-PASSWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-AUDIT_DATASET_CONFIG = {
-    # keyname : "audit"=audit only setting, "zfs"=zfs dataset setting, "ro"=read-only (not a setting)
-    'retention': 'audit',
-    'quota': 'zfs',
-    'reservation': 'zfs',
-    'quota_fill_warning': 'zfs',
-    'quota_fill_critical': 'zfs',
-    'remote_logging_enabled': 'other',
-    'space': 'ro'
-}
-MiB = 1024**2
-GiB = 1024**3
-
-
-# =====================================================================
-#                     Fixtures and utilities
-# =====================================================================
-class AUDIT_CONFIG():
-    defaults = {
-        'retention': 7,
-        'quota': 0,
-        'reservation': 0,
-        'quota_fill_warning': 75,
-        'quota_fill_critical': 95
-    }
-
-
-def get_zfs(data_type, key, zfs_config):
-    """ Get the equivalent ZFS value associated with the audit config setting """
-
-    types = {
-        'zfs': {
-            'reservation': zfs_config['properties']['refreservation']['parsed'] or 0,
-            'quota': zfs_config['properties']['refquota']['parsed'] or 0,  # audit quota == ZFS refquota
-            'refquota': zfs_config['properties']['refquota']['parsed'] or 0,
-            'quota_fill_warning': zfs_config['org.freenas:quota_warning'],
-            'quota_fill_critical': zfs_config['org.freenas:quota_critical']
-        },
-        'space': {
-            'used': zfs_config['properties']['used']['parsed'],
-            'used_by_snapshots': zfs_config['properties']['usedbysnapshots']['parsed'],
-            'available': zfs_config['properties']['available']['parsed'],
-            'used_by_dataset': zfs_config['properties']['usedbydataset']['parsed'],
-            # We set 'refreservation' and there is no 'usedbyreservation'
-            'used_by_reservation': zfs_config['properties']['usedbyrefreservation']['parsed']
-        }
-    }
-    return types[data_type][key]
-
-
-def check_audit_download(report_path, report_type, tag=None):
-    """ Download audit DB (root user)
-    If requested, assert the tag is present
-    INPUT: report_type ['CSV'|'JSON'|'YAML']
-    RETURN: lenght of content (bytes)
-    """
-    job_id, url_path = call(
-        "core.download", "audit.download_report",
-        [{"report_name": os.path.basename(report_path)}],
-        f"report.{report_type.lower()}"
-    )
-    r = requests.get(f"{url()}{url_path}")
-    r.raise_for_status()
-    if tag is not None:
-        assert f"{tag}" in r.text
-    return len(r.content)
-
-
-@pytest.fixture(scope='class')
-def initialize_for_smb_tests():
-    with dataset('audit-test-basic', data={'share_type': 'SMB'}) as ds:
-        with smb_share(os.path.join('/mnt', ds), 'AUDIT_BASIC_TEST', {
-            'purpose': 'NO_PRESET',
-            'guestok': False,
-            'audit': {'enable': True}
-        }) as s:
-            with user({
-                'username': SMBUSER,
-                'full_name': SMBUSER,
-                'group_create': True,
-                'password': PASSWD,
-                'smb': True
-            }) as u:
-                yield {'dataset': ds, 'share': s, 'user': u}
-
-
-@pytest.fixture(scope='class')
-def init_audit():
-    """ Provides the audit and dataset configs and cleans up afterward """
-    try:
-        dataset = call('audit.get_audit_dataset')
-        config = call('audit.config')
-        yield (config, dataset)
-    finally:
-        call('audit.update', AUDIT_CONFIG.defaults)
-
-
-@pytest.fixture(scope='class')
-def standby_audit_event():
-    """ HA system: Create an audit event on the standby node
-    Attempt to delete a built-in user on the standby node
-    """
-    event = "user.delete"
-    username = "backup"
-    user = call('user.query', [["username", "=", username]], {"select": ["id"], "get": True})
-    # Generate an audit entry on the remote node
-    with pytest.raises(CallError):
-        call('failover.call_remote', event, [user['id']])
-
-    yield {"event": event, "username": username}
-
-
-# =====================================================================
-#                           Tests
-# =====================================================================
-class TestAuditConfig:
-    def test_audit_config_defaults(self, init_audit):
-        (config, dataset) = init_audit
-
-        # Confirm existence of config entries
-        for key in [k for k in AUDIT_DATASET_CONFIG]:
-            assert key in config, str(config)
-
-        # Confirm audit default config settings
-        assert config['retention'] == AUDIT_CONFIG.defaults['retention']
-        assert config['quota'] == AUDIT_CONFIG.defaults['quota']
-        assert config['reservation'] == AUDIT_CONFIG.defaults['reservation']
-        assert config['quota_fill_warning'] == AUDIT_CONFIG.defaults['quota_fill_warning']
-        assert config['quota_fill_critical'] == AUDIT_CONFIG.defaults['quota_fill_critical']
-        assert config['remote_logging_enabled'] is False
-        for key in ['used', 'used_by_snapshots', 'used_by_dataset', 'used_by_reservation', 'available']:
-            assert key in config['space'], str(config['space'])
-
-        for service in ['MIDDLEWARE', 'SMB', 'SUDO']:
-            assert service in config['enabled_services']
-
-        # Confirm audit dataset settings
-        for key in [k for k in AUDIT_DATASET_CONFIG if AUDIT_DATASET_CONFIG[k] == 'zfs']:
-            assert get_zfs('zfs', key, dataset) == config[key], f"config[{key}] = {config[key]}"
-
-    def test_audit_config_dataset_defaults(self, init_audit):
-        """ Confirm Audit dataset uses Audit default settings """
-        (unused, ds_config) = init_audit
-        assert ds_config['org.freenas:refquota_warning'] == AUDIT_CONFIG.defaults['quota_fill_warning']
-        assert ds_config['org.freenas:refquota_critical'] == AUDIT_CONFIG.defaults['quota_fill_critical']
-
-    def test_audit_config_updates(self):
-        """
-        This test validates that setting values has expected results.
-        """
-        new_config = call('audit.update', {'retention': 10})
-        assert new_config['retention'] == 10
-
-        # quota are in units of GiB
-        new_config = call('audit.update', {'quota': 1})
-        assert new_config['quota'] == 1
-        audit_dataset = call('audit.get_audit_dataset')
-
-        # ZFS value is in units of bytes.  Convert to GiB for comparison.
-        assert get_zfs('zfs', 'refquota', audit_dataset) // GiB == new_config['quota']
-
-        # Confirm ZFS and audit config are in sync
-        assert new_config['space']['available'] == get_zfs('space', 'available', audit_dataset)
-        assert new_config['space']['used_by_dataset'] == get_zfs('space', 'used', audit_dataset)
-
-        # Check that we're actually setting the quota by evaluating available space
-        # Change the the quota to something more interesting
-        new_config = call('audit.update', {'quota': 2})
-        assert new_config['quota'] == 2
-
-        audit_dataset = call('audit.get_audit_dataset')
-        assert get_zfs('zfs', 'refquota', audit_dataset) == 2*GiB  # noqa (allow 2*GiB)
-
-        used_in_dataset = get_zfs('space', 'used_by_dataset', audit_dataset)
-        assert 2*GiB - new_config['space']['available'] == used_in_dataset  # noqa (allow 2*GiB)
-
-        new_config = call('audit.update', {'reservation': 1})
-        assert new_config['reservation'] == 1
-        assert new_config['space']['used_by_reservation'] != 0
-
-        new_config = call('audit.update', {
-            'quota_fill_warning': 70,
-            'quota_fill_critical': 80
-        })
-
-        assert new_config['quota_fill_warning'] == 70
-        assert new_config['quota_fill_critical'] == 80
-
-        # Test disable reservation
-        new_config = call('audit.update', {'reservation': 0})
-        assert new_config['reservation'] == 0
-
-        # Test disable quota
-        new_config = call('audit.update', {'quota': 0})
-        assert new_config['quota'] == 0
-
-
-class TestAuditOps:
-    def test_audit_query(self, initialize_for_smb_tests):
-        # If this test has been run more than once on this VM, then
-        # the audit DB _will_ record the creation.
-        # Let's get the starting count.
-        initial_ops_count = call('audit.query', {
-            'services': ['SMB'],
-            'query-filters': [['username', '=', SMBUSER]],
-            'query-options': {'count': True}
-        })
-
-        share = initialize_for_smb_tests['share']
-        with smb_connection(
-            share=share['name'],
-            username=SMBUSER,
-            password=PASSWD,
-        ) as c:
-            fd = c.create_file('testfile.txt', 'w')
-            for i in range(0, 3):
-                c.write(fd, b'foo')
-                c.read(fd, 0, 3)
-            c.close(fd, True)
-
-        retries = 2
-        ops_count = initial_ops_count
-        while retries > 0 and (ops_count - initial_ops_count) <= 0:
-            sleep(5)
-            ops_count = call('audit.query', {
-                'services': ['SMB'],
-                'query-filters': [['username', '=', SMBUSER]],
-                'query-options': {'count': True}
-            })
-            retries -= 1
-        assert ops_count > initial_ops_count, f"retries remaining = {retries}"
-
-    def test_audit_order_by(self):
-        entries_forward = call('audit.query', {'services': ['SMB'], 'query-options': {
-            'order_by': ['audit_id']
-        }})
-
-        entries_reverse = call('audit.query', {'services': ['SMB'], 'query-options': {
-            'order_by': ['-audit_id']
-        }})
-
-        head_forward_id = entries_forward[0]['audit_id']
-        tail_forward_id = entries_forward[-1]['audit_id']
-
-        head_reverse_id = entries_reverse[0]['audit_id']
-        tail_reverse_id = entries_reverse[-1]['audit_id']
-
-        assert head_forward_id == tail_reverse_id
-        assert tail_forward_id == head_reverse_id
-
-    def test_audit_export(self):
-        for backend in ['CSV', 'JSON', 'YAML']:
-            report_path = call('audit.export', {'export_format': backend}, job=True)
-            assert report_path.startswith('/audit/reports/root/')
-            st = call('filesystem.stat', report_path)
-            assert st['size'] != 0, str(st)
-
-            content_len = check_audit_download(report_path, backend)
-            assert content_len == st['size']
-
-    def test_audit_export_nonroot(self):
-        with unprivileged_user_client(roles=['SYSTEM_AUDIT_READ', 'FILESYSTEM_ATTRS_READ']) as c:
-            me = c.call('auth.me')
-            username = me['pw_name']
-
-            for backend in ['CSV', 'JSON', 'YAML']:
-                report_path = c.call('audit.export', {'export_format': backend}, job=True)
-                assert report_path.startswith(f'/audit/reports/{username}/')
-                st = c.call('filesystem.stat', report_path)
-                assert st['size'] != 0, str(st)
-
-                # Make the call as the client
-                job_id, path = c.call(
-                    "core.download", "audit.download_report",
-                    [{"report_name": os.path.basename(report_path)}],
-                    f"report.{backend.lower()}"
-                )
-                r = requests.get(f"{url()}{path}")
-                r.raise_for_status()
-                assert len(r.content) == st['size']
-
-    @pytest.mark.parametrize('svc', ["MIDDLEWARE", "SMB"])
-    def test_audit_timestamps(self, svc):
-        """
-        NAS-130373
-        Confirm the timestamps are processed as expected
-        """
-        audit_entry = get_audit_entry(svc)
-
-        ae_ts_ts = int(audit_entry['timestamp'].timestamp())
-        ae_msg_ts = int(audit_entry['message_timestamp'])
-        assert abs(ae_ts_ts - ae_msg_ts) < 2, f"$date='{ae_ts_ts}, message_timestamp={ae_msg_ts}"
-
-
-@pytest.mark.skipif(not ha, reason="Skip HA tests")
-class TestAuditOpsHA:
-    @pytest.mark.parametrize('remote_available', [True, False])
-    def test_audit_ha_query(self, standby_audit_event, remote_available):
-        '''
-        Confirm:
-            1) Ability to get a remote node audit event from a healthy remote node
-            2) Generate an exception on remote node audit event get if the remote node is unavailable.
-        NOTE: The standby_audit_event fixture generates the remote node audit event.
-        '''
-        event = standby_audit_event['event']
-        username = standby_audit_event['username']
-        payload = {
-            "query-filters": [["event_data.method", "=", event], ["success", "=", False]],
-            "query-options": {"select": ["event_data", "success"]},
-            "remote_controller": True
-        }
-        if not remote_available:
-            job_id = call('failover.reboot.other_node')
-            # Let the reboot get churning
-            sleep(2)
-            with pytest.raises(ValidationError) as e:
-                call('audit.query', payload)
-            assert "failed to communicate" in str(e.value)
-
-            # Wait for the remote to return
-            assert call("core.job_wait", job_id, job=True)
-        else:
-            # Handle delays in the audit database
-            remote_audit_entry = []
-            tries = 3
-            while tries > 0 and remote_audit_entry == []:
-                sleep(1)
-                remote_audit_entry = call('audit.query', payload)
-                if remote_audit_entry != []:
-                    break
-                tries -= 1
-
-            assert tries > 0, "Failed to get expected audit entry"
-            assert remote_audit_entry != []
-            description = remote_audit_entry[0]['event_data']['description']
-            assert username in description, remote_audit_entry[0]['event_data']
-
-    def test_audit_ha_export(self, standby_audit_event):
-        """
-        Confirm we can download 'Active' and 'Standby' audit DB.
-        With a failed user delete on the 'Standby' controller download the
-        audit DB from both controllers and confirm the failure is
-        in the 'Standby' audit DB and not in the 'Active' audit DB.
-        """
-        assert standby_audit_event
-        username = standby_audit_event['username']
-        report_path_active = call('audit.export', {'export_format': 'CSV'}, job=True)
-        report_path_standby = call('audit.export', {'export_format': 'CSV', 'remote_controller': True}, job=True)
-
-        # Confirm entry NOT in active controller audit DB
-        with pytest.raises(AssertionError):
-            check_audit_download(report_path_active, 'CSV', f"Delete user {username}")
-
-        # Confirm entry IS in standby controller audit DB
-        check_audit_download(report_path_standby, 'CSV', f"Delete user {username}")
diff --git a/tests/api2/test_audit_dataset.py b/tests/api2/test_audit_dataset.py
deleted file mode 100644
index 57f32e6d368a1..0000000000000
--- a/tests/api2/test_audit_dataset.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from middlewared.test.integration.utils import call, pool
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-DS_NAME = f'{pool}/audit_dataset_insert_name_here'
-
-
-def test_dataset_audit():
-    payload = {'name': DS_NAME}
-
-    try:
-        with expect_audit_method_calls([{
-            'method': 'pool.dataset.create',
-            'params': [payload],
-            'description': f'Pool dataset create {DS_NAME}',
-        }]):
-            call('pool.dataset.create', payload)
-
-        with expect_audit_method_calls([{
-            'method': 'pool.dataset.update',
-            'params': [DS_NAME, {'atime': 'OFF'}],
-            'description': f'Pool dataset update {DS_NAME}',
-        }]):
-            call('pool.dataset.update', DS_NAME, {'atime': 'OFF'})
-
-    finally:
-        with expect_audit_method_calls([{
-            'method': 'pool.dataset.delete',
-            'params': [DS_NAME],
-            'description': f'Pool dataset delete {DS_NAME}',
-        }]):
-            call('pool.dataset.delete', DS_NAME)
diff --git a/tests/api2/test_audit_ftp.py b/tests/api2/test_audit_ftp.py
deleted file mode 100644
index f7705a45517cb..0000000000000
--- a/tests/api2/test_audit_ftp.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-
-def test_ftp_config_audit():
-    '''
-    Test the auditing of FTP configuration changes
-    '''
-    initial_ftp_config = call('ftp.config')
-    try:
-        # UPDATE
-        payload = {
-            'clients': 1000,
-            'banner': "Hello, from New York"
-        }
-        with expect_audit_method_calls([{
-            'method': 'ftp.update',
-            'params': [payload],
-            'description': 'Update FTP configuration',
-        }]):
-            call('ftp.update', payload)
-    finally:
-        # Restore initial state
-        restore_payload = {
-            'clients': initial_ftp_config['clients'],
-            'banner': initial_ftp_config['banner']
-        }
-        call('ftp.update', restore_payload)
diff --git a/tests/api2/test_audit_iscsi.py b/tests/api2/test_audit_iscsi.py
deleted file mode 100644
index 881abe81e4886..0000000000000
--- a/tests/api2/test_audit_iscsi.py
+++ /dev/null
@@ -1,408 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.iscsi import iscsi_extent, iscsi_target
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-REDACTED_SECRET = '********'
-MB = 1024 * 1024
-MB_100 = 100 * MB
-DEFAULT_ISCSI_PORT = 3260
-
-
-@pytest.fixture(scope='module')
-def initialize_zvol_for_iscsi_audit_tests(request):
-    with dataset('audit-test-iscsi') as ds:
-        zvol = f'{ds}/zvol'
-        payload = {
-            'name': zvol,
-            'type': 'VOLUME',
-            'volsize': MB_100,
-            'volblocksize': '16K'
-        }
-        zvol_config = call('pool.dataset.create', payload)
-        try:
-            yield zvol
-        finally:
-            call('pool.dataset.delete', zvol_config['id'])
-
-
-def test_iscsi_auth_audit():
-    auth_config = None
-    tag = 1
-    user1 = 'someuser1'
-    user2 = 'someuser2'
-    password1 = 'somepassword123'
-    password2 = 'newpassword1234'
-    try:
-        # CREATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.auth.create',
-            'params': [
-                {
-                    'tag': tag,
-                    'user': user1,
-                    'secret': REDACTED_SECRET,
-                }
-            ],
-            'description': f'Create iSCSI Authorized Access {user1} ({tag})',
-        }]):
-            payload = {
-                'tag': tag,
-                'user': user1,
-                'secret': password1,
-            }
-            auth_config = call('iscsi.auth.create', payload)
-        # UPDATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.auth.update',
-            'params': [
-                auth_config['id'],
-                {
-                    'user': user2,
-                    'secret': REDACTED_SECRET,
-                }],
-            'description': f'Update iSCSI Authorized Access {user1} ({tag})',
-        }]):
-            payload = {
-                'user': user2,
-                'secret': password2,
-            }
-            auth_config = call('iscsi.auth.update', auth_config['id'], payload)
-    finally:
-        if auth_config is not None:
-            # DELETE
-            id_ = auth_config['id']
-            with expect_audit_method_calls([{
-                'method': 'iscsi.auth.delete',
-                'params': [id_],
-                'description': f'Delete iSCSI Authorized Access {user2} ({tag})',
-            }]):
-                call('iscsi.auth.delete', id_)
-
-
-def test_iscsi_extent_audit(initialize_zvol_for_iscsi_audit_tests):
-    extent_name1 = 'extent1'
-    extent_name2 = 'extent2'
-    disk = f'zvol/{initialize_zvol_for_iscsi_audit_tests}'
-    try:
-        # CREATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.extent.create',
-            'params': [
-                {
-                    'type': 'DISK',
-                    'disk': disk,
-                    'name': extent_name1,
-                }
-            ],
-            'description': f'Create iSCSI extent {extent_name1}',
-        }]):
-            payload = {
-                'type': 'DISK',
-                'disk': disk,
-                'name': extent_name1,
-            }
-            extent_config = call('iscsi.extent.create', payload)
-        # UPDATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.extent.update',
-            'params': [
-                extent_config['id'],
-                {
-                    'name': extent_name2,
-                }],
-            'description': f'Update iSCSI extent {extent_name1}',
-        }]):
-            payload = {
-                'name': extent_name2,
-            }
-            extent_config = call('iscsi.extent.update', extent_config['id'], payload)
-    finally:
-        if extent_config is not None:
-            # DELETE
-            id_ = extent_config['id']
-            with expect_audit_method_calls([{
-                'method': 'iscsi.extent.delete',
-                'params': [id_],
-                'description': f'Delete iSCSI extent {extent_name2}',
-            }]):
-                call('iscsi.extent.delete', id_)
-
-
-def test_iscsi_global_audit():
-    global_config = None
-    try:
-        # CREATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.global.update',
-            'params': [
-                {
-                    'alua': True,
-                    'listen_port': 13260,
-                }
-            ],
-            'description': 'Update iSCSI',
-        }]):
-            payload = {
-                'alua': True,
-                'listen_port': 13260,
-            }
-            global_config = call('iscsi.global.update', payload)
-    finally:
-        if global_config is not None:
-            payload = {
-                'alua': False,
-                'listen_port': DEFAULT_ISCSI_PORT,
-            }
-            global_config = call('iscsi.global.update', payload)
-
-
-def test_iscsi_host_audit():
-    host_config = None
-    ip = '1.2.3.4'
-    iqn = 'iqn.1993-08.org.debian:01:1234567890'
-    description = 'Development VM (debian)'
-    try:
-        # CREATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.host.create',
-            'params': [
-                {
-                    'ip': ip,
-                    'iqns': [iqn],
-                }
-            ],
-            'description': f'Create iSCSI host {ip}',
-        }]):
-            payload = {
-                'ip': ip,
-                'iqns': [iqn],
-            }
-            host_config = call('iscsi.host.create', payload)
-        # UPDATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.host.update',
-            'params': [
-                host_config['id'],
-                {
-                    'description': description,
-                }],
-            'description': f'Update iSCSI host {ip}',
-        }]):
-            payload = {
-                'description': description,
-            }
-            host_config = call('iscsi.host.update', host_config['id'], payload)
-    finally:
-        if host_config is not None:
-            # DELETE
-            id_ = host_config['id']
-            with expect_audit_method_calls([{
-                'method': 'iscsi.host.delete',
-                'params': [id_],
-                'description': f'Delete iSCSI host {ip}',
-            }]):
-                call('iscsi.host.delete', id_)
-
-
-def test_iscsi_initiator_audit():
-    initiator_config = None
-    comment = 'Default initiator'
-    comment2 = 'INITIATOR'
-    try:
-        # CREATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.initiator.create',
-            'params': [
-                {
-                    'comment': comment,
-                    'initiators': [],
-                }
-            ],
-            'description': f'Create iSCSI initiator {comment}',
-        }]):
-            payload = {
-                'comment': comment,
-                'initiators': [],
-            }
-            initiator_config = call('iscsi.initiator.create', payload)
-        # UPDATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.initiator.update',
-            'params': [
-                initiator_config['id'],
-                {
-                    'comment': comment2,
-                    'initiators': ['1.2.3.4', '5.6.7.8'],
-                }],
-            'description': f'Update iSCSI initiator {comment}',
-        }]):
-            payload = {
-                'comment': comment2,
-                'initiators': ['1.2.3.4', '5.6.7.8'],
-            }
-            initiator_config = call('iscsi.initiator.update', initiator_config['id'], payload)
-    finally:
-        if initiator_config is not None:
-            # DELETE
-            id_ = initiator_config['id']
-            with expect_audit_method_calls([{
-                'method': 'iscsi.initiator.delete',
-                'params': [id_],
-                'description': f'Delete iSCSI initiator {comment2}',
-            }]):
-                call('iscsi.initiator.delete', id_)
-
-
-def test_iscsi_portal_audit():
-    portal_config = None
-    comment = 'Default portal'
-    comment2 = 'PORTAL'
-    try:
-        # CREATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.portal.create',
-            'params': [
-                {
-                    'listen': [{'ip': '0.0.0.0'}],
-                    'comment': comment,
-                    'discovery_authmethod': 'NONE',
-                }
-            ],
-            'description': f'Create iSCSI portal {comment}',
-        }]):
-            payload = {
-                'listen': [{'ip': '0.0.0.0'}],
-                'comment': comment,
-                'discovery_authmethod': 'NONE',
-            }
-            portal_config = call('iscsi.portal.create', payload)
-        # UPDATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.portal.update',
-            'params': [
-                portal_config['id'],
-                {
-                    'comment': comment2,
-                }],
-            'description': f'Update iSCSI portal {comment}',
-        }]):
-            payload = {
-                'comment': comment2,
-            }
-            portal_config = call('iscsi.portal.update', portal_config['id'], payload)
-    finally:
-        if portal_config is not None:
-            # DELETE
-            id_ = portal_config['id']
-            with expect_audit_method_calls([{
-                'method': 'iscsi.portal.delete',
-                'params': [id_],
-                'description': f'Delete iSCSI portal {comment2}',
-            }]):
-                call('iscsi.portal.delete', id_)
-
-
-def test_iscsi_target_audit():
-    target_config = None
-    target_name = 'target1'
-    target_alias1 = 'target1 alias'
-    target_alias2 = 'Updated target1 alias'
-    try:
-        # CREATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.target.create',
-            'params': [
-                {
-                    'name': target_name,
-                    'alias': target_alias1,
-                }
-            ],
-            'description': f'Create iSCSI target {target_name}',
-        }]):
-            payload = {
-                'name': target_name,
-                'alias': target_alias1,
-            }
-            target_config = call('iscsi.target.create', payload)
-        # UPDATE
-        with expect_audit_method_calls([{
-            'method': 'iscsi.target.update',
-            'params': [
-                target_config['id'],
-                {
-                    'alias': target_alias2,
-                }],
-            'description': f'Update iSCSI target {target_name}',
-        }]):
-            payload = {
-                'alias': target_alias2,
-            }
-            target_config = call('iscsi.target.update', target_config['id'], payload)
-    finally:
-        if target_config is not None:
-            # DELETE
-            id_ = target_config['id']
-            with expect_audit_method_calls([{
-                'method': 'iscsi.target.delete',
-                'params': [id_, True],
-                'description': f'Delete iSCSI target {target_name}',
-            }]):
-                call('iscsi.target.delete', id_, True)
-
-
-def test_iscsi_targetextent_audit(initialize_zvol_for_iscsi_audit_tests):
-
-    payload = {
-        'type': 'DISK',
-        'disk': f'zvol/{initialize_zvol_for_iscsi_audit_tests}',
-        'name': 'extent1',
-    }
-    with iscsi_extent(payload) as extent_config:
-        with iscsi_target({'name': 'target1', 'alias': 'Audit test'}) as target_config:
-            targetextent_config = None
-            try:
-                # CREATE
-                with expect_audit_method_calls([{
-                    'method': 'iscsi.targetextent.create',
-                    'params': [
-                        {
-                            'target': target_config['id'],
-                            'extent': extent_config['id'],
-                            'lunid': 0,
-                        }
-                    ],
-                    'description': 'Create iSCSI target/LUN/extent mapping target1/0/extent1',
-                }]):
-                    payload = {
-                        'target': target_config['id'],
-                        'extent': extent_config['id'],
-                        'lunid': 0,
-                    }
-                    targetextent_config = call('iscsi.targetextent.create', payload)
-                # UPDATE
-                with expect_audit_method_calls([{
-                    'method': 'iscsi.targetextent.update',
-                    'params': [
-                        targetextent_config['id'],
-                        {
-                            'lunid': 1,
-                        }],
-                    'description': 'Update iSCSI target/LUN/extent mapping target1/0/extent1',
-                }]):
-                    payload = {
-                        'lunid': 1,
-                    }
-                    targetextent_config = call('iscsi.targetextent.update', targetextent_config['id'], payload)
-            finally:
-                if targetextent_config is not None:
-                    # DELETE
-                    id_ = targetextent_config['id']
-                    with expect_audit_method_calls([{
-                        'method': 'iscsi.targetextent.delete',
-                        'params': [id_, True],
-                        'description': 'Delete iSCSI target/LUN/extent mapping target1/1/extent1',
-                    }]):
-                        call('iscsi.targetextent.delete', id_, True)
diff --git a/tests/api2/test_audit_nfs.py b/tests/api2/test_audit_nfs.py
deleted file mode 100644
index 7ccdcce08b788..0000000000000
--- a/tests/api2/test_audit_nfs.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import pytest
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-@pytest.fixture(scope='module')
-def nfs_audit_dataset(request):
-    with dataset('audit-test-nfs') as ds:
-        try:
-            yield ds
-        finally:
-            pass
-
-
-def test_nfs_config_audit():
-    '''
-    Test the auditing of NFS configuration changes
-    '''
-    bogus_user = 'bogus_user'
-    bogus_password = 'boguspassword123'
-    initial_nfs_config = call('nfs.config')
-    try:
-        # UPDATE
-        payload = {
-            'mountd_log': not initial_nfs_config['mountd_log'],
-            'mountd_port': 618,
-            'protocols': ["NFSV4"]
-        }
-        with expect_audit_method_calls([{
-            'method': 'nfs.update',
-            'params': [payload],
-            'description': 'Update NFS configuration',
-        }]):
-            call('nfs.update', payload)
-    finally:
-        # Restore initial state
-        restore_payload = {
-            'mountd_log': initial_nfs_config['mountd_log'],
-            'mountd_port': initial_nfs_config['mountd_port'],
-            'protocols': initial_nfs_config['protocols']
-        }
-        call('nfs.update', restore_payload)
-
-
-def test_nfs_share_audit(nfs_audit_dataset):
-    '''
-    Test the auditing of NFS share operations
-    '''
-    nfs_export_path = f"/mnt/{nfs_audit_dataset}"
-    try:
-        # CREATE
-        payload = {
-            "comment": "My Test Share",
-            "path": nfs_export_path,
-            "security": ["SYS"]
-        }
-        with expect_audit_method_calls([{
-            'method': 'sharing.nfs.create',
-            'params': [payload],
-            'description': f'NFS share create {nfs_export_path}',
-        }]):
-            share_config = call('sharing.nfs.create', payload)
-        # UPDATE
-        payload = {
-            "security": []
-        }
-        with expect_audit_method_calls([{
-            'method': 'sharing.nfs.update',
-            'params': [
-                share_config['id'],
-                payload,
-            ],
-            'description': f'NFS share update {nfs_export_path}',
-        }]):
-            share_config = call('sharing.nfs.update', share_config['id'], payload)
-    finally:
-        if share_config is not None:
-            # DELETE
-            id_ = share_config['id']
-            with expect_audit_method_calls([{
-                'method': 'sharing.nfs.delete',
-                'params': [id_],
-                'description': f'NFS share delete {nfs_export_path}',
-            }]):
-                call('sharing.nfs.delete', id_)
diff --git a/tests/api2/test_audit_permission.py b/tests/api2/test_audit_permission.py
deleted file mode 100644
index b97c2079e190b..0000000000000
--- a/tests/api2/test_audit_permission.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-JENNY = 8675309
-
-
-def test_audit_chown():
-    with dataset('audit_chown') as ds:
-        path = os.path.join('/mnt', ds)
-        payload = {'path': path, 'uid': JENNY}
-
-        with expect_audit_method_calls([{
-            'method': 'filesystem.chown',
-            'params': [payload],
-            'description': f'Filesystem change owner {path}'
-        }]):
-            call('filesystem.chown', payload, job=True)
-
-
-def test_audit_setperm():
-    with dataset('audit_setperm') as ds:
-        path = os.path.join('/mnt', ds)
-        payload = {'path': path, 'mode': '777'}
-
-        with expect_audit_method_calls([{
-            'method': 'filesystem.setperm',
-            'params': [payload],
-            'description': f'Filesystem set permission {path}'
-        }]):
-            call('filesystem.setperm', payload, job=True)
-
-
-def test_audit_setacl():
-    with dataset('audit_setacl', {'share_type': 'SMB'}) as ds:
-        path = os.path.join('/mnt', ds)
-        the_acl = call('filesystem.getacl', os.path.join('/mnt', ds))['acl']
-        the_acl.append({
-            'tag': 'USER',
-            'id': JENNY,
-            'perms': {'BASIC': 'FULL_CONTROL'},
-            'flags': {'BASIC': 'INHERIT'},
-            'type': 'ALLOW'
-        })
-
-        payload = {'path': path, 'dacl': the_acl}
-
-        with expect_audit_method_calls([{
-            'method': 'filesystem.setacl',
-            'params': [payload],
-            'description': f'Filesystem set ACL {path}'
-        }]):
-            call('filesystem.setacl', payload, job=True)
diff --git a/tests/api2/test_audit_pool.py b/tests/api2/test_audit_pool.py
deleted file mode 100644
index 39126d85daafc..0000000000000
--- a/tests/api2/test_audit_pool.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_log
-
-
-def test_pool_update_audit_success():
-    with another_pool() as pool:
-        params = [pool['id'], {'autotrim': 'ON'}]
-        with expect_audit_log([{
-            'event_data': {
-                'authenticated': True,
-                'authorized': True,
-                'method': 'pool.update',
-                'params': params,
-                'description': f'Pool update test',
-            },
-            'success': True,
-        }]):
-            call('pool.update', *params, job=True)
-
-
-def test_pool_update_audit_error():
-    with another_pool() as pool:
-        params = [pool['id'], {'topology': {'spares': ['nonexistent']}}]
-
-        with expect_audit_log([{
-            'event_data': {
-                'authenticated': True,
-                'authorized': True,
-                'method': 'pool.update',
-                'params': params,
-                'description': f'Pool update test',
-            },
-            'success': False,
-        }]):
-            with pytest.raises(Exception):
-                call('pool.update', *params, job=True)
diff --git a/tests/api2/test_audit_rest.py b/tests/api2/test_audit_rest.py
deleted file mode 100644
index 1a33d9dfeb7f5..0000000000000
--- a/tests/api2/test_audit_rest.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# -*- coding=utf-8 -*-
-import io
-import json
-import os
-import sys
-from unittest.mock import ANY
-
-import requests
-
-from middlewared.test.integration.assets.account import unprivileged_user
-from middlewared.test.integration.utils import call, url
-from middlewared.test.integration.utils.audit import expect_audit_log
-
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from functions import POST
-
-
-def test_unauthenticated_call():
-    with expect_audit_log([
-        {
-            "event": "AUTHENTICATION",
-            "event_data": {
-                "credentials": {
-                    "credentials": "LOGIN_PASSWORD",
-                    "credentials_data": {"username": "invalid"},
-                },
-                "error": "Bad username or password",
-            },
-            "success": False,
-        }
-    ], include_logins=True):
-        r = requests.get(f"{url()}/api/v2.0/system/info", auth=("invalid", "password"))
-        assert r.status_code == 401
-
-
-def test_unauthenticated_upload_call():
-    with expect_audit_log([
-        {
-            "event": "AUTHENTICATION",
-            "event_data": {
-                "credentials": {
-                    "credentials": "LOGIN_PASSWORD",
-                    "credentials_data": {"username": "invalid"},
-                },
-                "error": "Bad username or password",
-            },
-            "success": False,
-        }
-    ], include_logins=True):
-        r = requests.post(
-            f"{url()}/api/v2.0/resttest/test_input_pipe",
-            auth=("invalid", "password"),
-            files={
-                "data": (None, io.StringIO('{"key": "value"}')),
-                "file": (None, io.StringIO("FILE")),
-            },
-        )
-        assert r.status_code == 401
-
-
-def test_authenticated_call():
-    user_id = None
-    try:
-        with expect_audit_log([
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "REST",
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": "root", "login_at": ANY},
-                    },
-                },
-                "event": "AUTHENTICATION",
-                "event_data": {
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": "root"},
-                    },
-                    "error": None,
-                },
-                "success": True,
-            },
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "REST",
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": "root", "login_at": ANY},
-                    },
-                },
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": True,
-                    "authorized": True,
-                    "method": "user.create",
-                    "params": [
-                        {
-                            "username": "sergey",
-                            "full_name": "Sergey",
-                            "group_create": True,
-                            "home": "/nonexistent",
-                            "password": "********",
-                        }
-                    ],
-                    "description": "Create user sergey",
-                },
-                "success": True,
-            },
-        ], include_logins=True):
-            r = POST("/user", {
-                "username": "sergey",
-                "full_name": "Sergey",
-                "group_create": True,
-                "home": "/nonexistent",
-                "password": "password",
-            })
-            assert r.status_code == 200
-            user_id = r.json()
-    finally:
-        if user_id is not None:
-            call("user.delete", user_id)
-
-
-def test_unauthorized_call():
-    with unprivileged_user(
-        username="unprivileged",
-        group_name="unprivileged_users",
-        privilege_name="Unprivileged users",
-        allowlist=[],
-        roles=[],
-        web_shell=False,
-    ) as u:
-        with expect_audit_log([
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "REST",
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": ANY, "login_at": ANY},
-                    },
-                },
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": True,
-                    "authorized": False,
-                    "method": "user.create",
-                    "params": [{"username": "sergey", "full_name": "Sergey"}],
-                    "description": "Create user sergey",
-                },
-                "success": False,
-            }
-        ]):
-            r = requests.post(
-                f"{url()}/api/v2.0/user",
-                auth=(u.username, u.password),
-                headers={"Content-type": "application/json"},
-                data=json.dumps({"username": "sergey", "full_name": "Sergey"}),
-            )
-            assert r.status_code == 403, r.text
-
-
-def test_bogus_call():
-    with expect_audit_log([
-        {
-            "service_data": {
-                "vers": {
-                    "major": 0,
-                    "minor": 1,
-                },
-                "origin": ANY,
-                "protocol": "REST",
-                "credentials": {
-                    "credentials": "LOGIN_PASSWORD",
-                    "credentials_data": {"username": "root", "login_at": ANY},
-                },
-            },
-            "event": "METHOD_CALL",
-            "event_data": {
-                "authenticated": True,
-                "authorized": True,
-                "method": "user.create",
-                "params": [{}],
-                "description": "Create user",
-            },
-            "success": False,
-        }
-    ]):
-        response = POST("/user", {})
-        assert response.status_code == 422
diff --git a/tests/api2/test_audit_smb.py b/tests/api2/test_audit_smb.py
deleted file mode 100644
index e12ba132a6953..0000000000000
--- a/tests/api2/test_audit_smb.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import os
-import sys
-
-import pytest
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-sys.path.append(os.getcwd())
-
-REDACTED_SECRET = '********'
-
-
-@pytest.fixture(scope='module')
-def smb_audit_dataset(request):
-    with dataset('audit-test-smb') as ds:
-        try:
-            yield ds
-        finally:
-            pass
-
-
-def test_smb_update_audit():
-    '''
-    Test the auditing of SMB configuration changes
-    '''
-    initial_smb_config = call('smb.config')
-    payload = {'enable_smb1': True}
-    try:
-        with expect_audit_method_calls([{
-            'method': 'smb.update',
-            'params': [payload],
-            'description': 'Update SMB configuration',
-        }]):
-            call('smb.update', payload)
-    finally:
-        call('smb.update', {'enable_smb1': False})
-
-
-def test_smb_share_audit(smb_audit_dataset):
-    '''
-    Test the auditing of SMB share operations
-    '''
-    smb_share_path = os.path.join('/mnt', smb_audit_dataset)
-    try:
-        # CREATE
-        payload = {
-            "comment": "My Test Share",
-            "path": smb_share_path,
-            "name": "audit_share"
-        }
-        with expect_audit_method_calls([{
-            'method': 'sharing.smb.create',
-            'params': [payload],
-            'description': f'SMB share create audit_share',
-        }]):
-            share_config = call('sharing.smb.create', payload)
-
-        # UPDATE
-        payload = {
-            "ro": True 
-        }
-        with expect_audit_method_calls([{
-            'method': 'sharing.smb.update',
-            'params': [
-                share_config['id'],
-                payload,
-            ],
-            'description': f'SMB share update audit_share',
-        }]):
-            share_config = call('sharing.smb.update', share_config['id'], payload)
-
-    finally:
-        if share_config is not None:
-            # DELETE
-            share_id = share_config['id']
-            with expect_audit_method_calls([{
-                'method': 'sharing.smb.delete',
-                'params': [share_id],
-                'description': f'SMB share delete audit_share',
-            }]):
-                call('sharing.smb.delete', share_id)
diff --git a/tests/api2/test_audit_sudo.py b/tests/api2/test_audit_sudo.py
deleted file mode 100644
index 09477fe0c8122..0000000000000
--- a/tests/api2/test_audit_sudo.py
+++ /dev/null
@@ -1,285 +0,0 @@
-import contextlib
-import secrets
-import string
-import time
-
-import pytest
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.time_utils import utc_now
-from datetime import timezone
-
-EVENT_KEYS = {'timestamp', 'message_timestamp', 'service_data', 'username', 'service', 'audit_id', 'address', 'event_data', 'event', 'session', 'success'}
-ACCEPT_KEYS = {'command', 'submituser', 'lines', 'submithost', 'uuid', 'runenv', 'server_time', 'runcwd', 'submitcwd', 'runuid', 'runargv', 'columns', 'runuser', 'submit_time'}
-REJECT_KEYS = {'command', 'submituser', 'lines', 'submithost', 'uuid', 'reason', 'runenv', 'server_time', 'runcwd', 'submitcwd', 'runuid', 'runargv', 'columns', 'runuser', 'submit_time'}
-
-LS_COMMAND = '/bin/ls'
-ECHO_COMMAND = '/bin/echo'
-
-SUDO_TO_USER = 'sudo-to-user'
-SUDO_TO_PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-
-def get_utc():
-    utc_time = int(utc_now().replace(tzinfo=timezone.utc).timestamp())
-    return utc_time
-
-
-def user_sudo_events(username, count=False):
-    payload = {
-        'services': ['SUDO'],
-        'query-filters': [['username', '=', username]],
-    }
-    if count:
-        payload['query-options'] = {'count': True}
-    return call('audit.query', payload)
-
-
-def wait_for_events(username, newcount, retries=20, delay=0.5):
-    assert retries > 0 and retries <= 20
-    assert delay >= 0.1 and delay <= 1
-    while newcount != user_sudo_events(username, True) and retries:
-        time.sleep(delay)
-        retries -= 1
-    return newcount
-
-
-def assert_accept(event):
-    assert type(event) is dict
-    set(event.keys()) == EVENT_KEYS
-    assert set(event['event_data'].keys()) == {'sudo'}
-    assert set(event['event_data']['sudo'].keys()) == {'accept'}
-    assert set(event['event_data']['sudo']['accept'].keys()) == ACCEPT_KEYS
-    return event['event_data']['sudo']['accept']
-
-
-def assert_reject(event):
-    assert type(event) is dict
-    set(event.keys()) == EVENT_KEYS
-    assert set(event['event_data'].keys()) == {'sudo'}
-    assert set(event['event_data']['sudo'].keys()) == {'reject'}
-    assert set(event['event_data']['sudo']['reject'].keys()) == REJECT_KEYS
-    return event['event_data']['sudo']['reject']
-
-
-def assert_timestamp(event, event_data):
-    """
-    NAS-130373:  message_timestamp should be UTC
-    """
-    assert type(event) is dict
-    submit_time = event_data['submit_time']['seconds']
-    msg_ts = event['message_timestamp']
-    utc_ts = get_utc()
-
-    # Confirm consistency and correctness of timestamps.
-    # The message_timestamp and the submit_time should be UTC and
-    # are expected to be mostly the same value. We allow for a generous delta between
-    # current UTC and the audit message timestamps.
-    assert abs(utc_ts - msg_ts) < 5, f"utc_ts={utc_ts}, msg_ts={msg_ts}"
-    assert abs(utc_ts - int(submit_time)) < 5, f"utc_ts={utc_ts}, submit_time={submit_time}"
-    assert abs(msg_ts - int(submit_time)) < 5, f"msg_ts={msg_ts}, submit_time={submit_time}"
-
-
-@contextlib.contextmanager
-def initialize_for_sudo_tests(username, password, data):
-    data.update({
-        'username': username,
-        'full_name': username,
-        'group_create': True,
-        'password': password,
-        'shell': '/usr/bin/bash',
-        'ssh_password_enabled': True,
-    })
-    with user(data) as newuser:
-        yield newuser
-
-
-@pytest.fixture(scope='module')
-def sudo_to_user():
-    with initialize_for_sudo_tests(SUDO_TO_USER, SUDO_TO_PASSWORD, {}) as u:
-        yield u
-
-
-class SudoTests:
-
-    def generate_command(self, cmd, runuser=None, password=None):
-        command = ['sudo']
-        if password:
-            command.append('-S')
-        if runuser:
-            command.extend(['-u', runuser])
-        command.append(cmd)
-        return " ".join(command)
-
-    def allowed_all(self):
-        """All of the sudo commands are allowed"""
-        # First get a baseline # of events
-        count = user_sudo_events(self.USER, True)
-
-        # Now create an event and do some basic checking
-        self.sudo_command('ls /etc')
-        assert count + 1 == wait_for_events(self.USER, count + 1)
-        event = user_sudo_events(self.USER)[-1]
-        accept = assert_accept(event)
-        assert accept['submituser'] == self.USER
-        assert accept['command'] == LS_COMMAND
-        assert accept['runuser'] == 'root'
-        assert accept['runargv'].split(',') == ['ls', '/etc']
-        # NAS-130373
-        assert_timestamp(event, accept)
-
-        # One more completely unique command
-        magic = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(20))
-        self.sudo_command(f'echo {magic}')
-        assert count + 2 == wait_for_events(self.USER, count + 2)
-        accept = assert_accept(user_sudo_events(self.USER)[-1])
-        assert accept['submituser'] == self.USER
-        assert accept['command'] == ECHO_COMMAND
-        assert accept['runuser'] == 'root'
-        assert accept['runargv'].split(',') == ['echo', magic]
-
-        # sudo to a non-root user
-        self.sudo_command('ls /tmp', SUDO_TO_USER)
-        assert count + 3 == wait_for_events(self.USER, count + 3)
-        accept = assert_accept(user_sudo_events(self.USER)[-1])
-        assert accept['submituser'] == self.USER
-        assert accept['command'] == LS_COMMAND
-        assert accept['runuser'] == SUDO_TO_USER
-        assert accept['runargv'].split(',') == ['ls', '/tmp']
-
-    def allowed_some(self):
-        """Some of the sudo commands are allowed"""
-        # First get a baseline # of events
-        count = user_sudo_events(self.USER, True)
-
-        # Generate a sudo command that we ARE allowed perform
-        magic = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(20))
-        self.sudo_command(f'echo {magic}')
-        assert count + 1 == wait_for_events(self.USER, count + 1)
-        accept = assert_accept(user_sudo_events(self.USER)[-1])
-        assert accept['submituser'] == self.USER
-        assert accept['command'] == ECHO_COMMAND
-        assert accept['runuser'] == 'root'
-        assert accept['runargv'].split(',') == ['echo', magic]
-
-        # Generate a sudo command that we are NOT allowed perform
-        with pytest.raises(AssertionError):
-            self.sudo_command('ls /etc')
-        # Returned exception depends upon whether passwd or nopasswd
-        assert count + 2 == wait_for_events(self.USER, count + 2)
-        reject = assert_reject(user_sudo_events(self.USER)[-1])
-        assert reject['submituser'] == self.USER
-        assert reject['command'] == LS_COMMAND
-        assert reject['runuser'] == 'root'
-        assert reject['runargv'].split(',') == ['ls', '/etc']
-        assert reject['reason'] == 'command not allowed'
-
-    def allowed_none(self):
-        """None of the sudo commands are allowed"""
-        # First get a baseline # of events
-        count = user_sudo_events(self.USER, True)
-
-        # Now create an event and do some basic checking to ensure it failed
-        with pytest.raises(AssertionError) as ve:
-            self.sudo_command('ls /etc')
-        assert 'is not allowed to execute ' in str(ve), str(ve)
-        assert count + 1 == wait_for_events(self.USER, count + 1)
-        event = user_sudo_events(self.USER)[-1]
-        reject = assert_reject(event)
-        assert reject['submituser'] == self.USER
-        assert reject['command'] == LS_COMMAND
-        assert reject['runuser'] == 'root'
-        assert reject['runargv'].split(',') == ['ls', '/etc']
-        assert reject['reason'] == 'command not allowed'
-        # NAS-130373
-        assert_timestamp(event, reject)
-
-
-class SudoNoPasswd:
-    def sudo_command(self, cmd, runuser=None):
-        command = self.generate_command(cmd, runuser)
-        ssh(command, user=self.USER, password=self.PASSWORD)
-
-
-class SudoPasswd:
-    def sudo_command(self, cmd, runuser=None):
-        command = f'echo {self.PASSWORD} | {self.generate_command(cmd, runuser, self.PASSWORD)}'
-        ssh(command, user=self.USER, password=self.PASSWORD)
-
-
-class TestSudoAllowedAllNoPasswd(SudoTests, SudoNoPasswd):
-
-    USER = 'sudo-allowed-all-nopw-user'
-    PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-    @pytest.fixture(scope='class')
-    def create_user(self):
-        with initialize_for_sudo_tests(self.USER,
-                                       self.PASSWORD,
-                                       {'sudo_commands_nopasswd': ['ALL']}) as u:
-            yield u
-
-    def test_audit_query(self, sudo_to_user, create_user):
-        self.allowed_all()
-
-
-class TestSudoAllowedAllPasswd(SudoTests, SudoPasswd):
-
-    USER = 'sudo-allowed-all-pw-user'
-    PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-    @pytest.fixture(scope='class')
-    def create_user(self):
-        with initialize_for_sudo_tests(self.USER,
-                                       self.PASSWORD,
-                                       {'sudo_commands': ['ALL']}) as u:
-            yield u
-
-    def test_audit_query(self, sudo_to_user, create_user):
-        self.allowed_all()
-
-
-class TestSudoAllowedNonePasswd(SudoTests, SudoPasswd):
-
-    USER = 'sudo-allowed-none-pw-user'
-    PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-    @pytest.fixture(scope='class')
-    def create_user(self):
-        with initialize_for_sudo_tests(self.USER, self.PASSWORD, {}) as u:
-            yield u
-
-    def test_audit_query(self, create_user):
-        self.allowed_none()
-
-
-class TestSudoAllowedSomeNoPasswd(SudoTests, SudoNoPasswd):
-
-    USER = 'sudo-allowed-some-nopw-user'
-    PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-    @pytest.fixture(scope='class')
-    def create_user(self):
-        with initialize_for_sudo_tests(self.USER,
-                                       self.PASSWORD,
-                                       {'sudo_commands_nopasswd': [ECHO_COMMAND]}) as u:
-            yield u
-
-    def test_audit_query(self, create_user):
-        self.allowed_some()
-
-
-class TestSudoAllowedSomePasswd(SudoTests, SudoPasswd):
-
-    USER = 'sudo-allowed-some-pw-user'
-    PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-    @pytest.fixture(scope='class')
-    def create_user(self):
-        with initialize_for_sudo_tests(self.USER,
-                                       self.PASSWORD,
-                                       {'sudo_commands': [ECHO_COMMAND]}) as u:
-            yield u
-
-    def test_audit_query(self, create_user):
-        self.allowed_some()
diff --git a/tests/api2/test_audit_websocket.py b/tests/api2/test_audit_websocket.py
deleted file mode 100644
index a83d0c0120939..0000000000000
--- a/tests/api2/test_audit_websocket.py
+++ /dev/null
@@ -1,452 +0,0 @@
-# -*- coding=utf-8 -*-
-from unittest.mock import ANY
-
-import pytest
-
-from middlewared.service_exception import CallError, ValidationErrors
-from middlewared.test.integration.assets.account import unprivileged_user_client, user
-from middlewared.test.integration.assets.api_key import api_key
-from middlewared.test.integration.assets.two_factor_auth import enabled_twofactor_auth, get_user_secret, get_2fa_totp_token
-from middlewared.test.integration.utils import call, client, ssh
-from middlewared.test.integration.utils.audit import expect_audit_log
-
-
-@pytest.fixture(scope='function')
-def sharing_admin_user(unprivileged_user_fixture):
-    privilege = call('privilege.query', [['local_groups.0.group', '=', unprivileged_user_fixture.group_name]])
-    assert len(privilege) > 0, 'Privilege not found'
-    call('privilege.update', privilege[0]['id'], {'roles': ['SHARING_ADMIN']})
-
-    try:
-        yield unprivileged_user_fixture
-    finally:
-        call('privilege.update', privilege[0]['id'], {'roles': []})
-
-
-def test_unauthenticated_call():
-    with client(auth=None) as c:
-        with expect_audit_log([
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "WEBSOCKET",
-                    "credentials": None,
-                },
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": False,
-                    "authorized": False,
-                    "method": "user.create",
-                    "params": [{"username": "sergey", "full_name": "Sergey"}],
-                    "description": "Create user sergey",
-                },
-                "success": False,
-            }
-        ]):
-            with pytest.raises(CallError):
-                c.call("user.create", {"username": "sergey", "full_name": "Sergey"})
-
-
-def test_unauthorized_call():
-    with unprivileged_user_client() as c:
-        with expect_audit_log([
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "WEBSOCKET",
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": ANY, "login_at": ANY},
-                    },
-                },
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": True,
-                    "authorized": False,
-                    "method": "user.create",
-                    "params": [{"username": "sergey", "full_name": "Sergey"}],
-                    "description": "Create user sergey",
-                },
-                "success": False,
-            }
-        ]):
-            with pytest.raises(CallError):
-                c.call("user.create", {"username": "sergey", "full_name": "Sergey"})
-
-
-def test_bogus_call():
-    with client() as c:
-        with expect_audit_log([
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "WEBSOCKET",
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": "root", "login_at": ANY},
-                    },
-                },
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": True,
-                    "authorized": True,
-                    "method": "user.create",
-                    "params": [{}],
-                    "description": "Create user",
-                },
-                "success": False,
-            }
-        ]):
-            with pytest.raises(ValidationErrors):
-                c.call("user.create", {})
-
-
-def test_invalid_call():
-    with client() as c:
-        with expect_audit_log([
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "WEBSOCKET",
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": "root", "login_at": ANY},
-                    },
-                },
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": True,
-                    "authorized": True,
-                    "method": "user.create",
-                    "params": [{"username": "sergey", "password": "********"}],
-                    "description": "Create user sergey",
-                },
-                "success": False,
-            }
-        ]):
-            with pytest.raises(ValidationErrors):
-                c.call("user.create", {"username": "sergey", "password": "password"})
-
-
-def test_typo_in_secret_credential_name():
-    with client() as c:
-        with expect_audit_log([
-            {
-                "service_data": {
-                    "vers": {
-                        "major": 0,
-                        "minor": 1,
-                    },
-                    "origin": ANY,
-                    "protocol": "WEBSOCKET",
-                    "credentials": {
-                        "credentials": "LOGIN_PASSWORD",
-                        "credentials_data": {"username": "root", "login_at": ANY},
-                    },
-                },
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": True,
-                    "authorized": True,
-                    "method": "user.create",
-                    "params": [{"username": "sergey"}],
-                    "description": "Create user sergey",
-                },
-                "success": False,
-            }
-        ]):
-            with pytest.raises(ValidationErrors):
-                c.call("user.create", {"username": "sergey", "passwrod": "password"})
-
-
-def test_valid_call():
-    with expect_audit_log([
-        {
-            "service_data": {
-                "vers": {
-                    "major": 0,
-                    "minor": 1,
-                },
-                "origin": ANY,
-                "protocol": "WEBSOCKET",
-                "credentials": {
-                    "credentials": "LOGIN_PASSWORD",
-                    "credentials_data": {"username": "root", "login_at": ANY},
-                },
-            },
-            "event": "METHOD_CALL",
-            "event_data": {
-                "authenticated": True,
-                "authorized": True,
-                "method": "user.create",
-                "params": [
-                    {
-                        "username": "sergey",
-                        "full_name": "Sergey",
-                        "group_create": True,
-                        "home": "/nonexistent",
-                        "password": "********",
-                        "home_create": True,
-                    }
-                ],
-                "description": "Create user sergey",
-            },
-            "success": True,
-        },
-        {},
-    ]):
-        with user({
-            "username": "sergey",
-            "full_name": "Sergey",
-            "group_create": True,
-            "home": "/nonexistent",
-            "password": "password",
-        }):
-            pass
-
-
-def test_password_login():
-    with expect_audit_log([
-        {
-            "service_data": {
-                "vers": {
-                    "major": 0,
-                    "minor": 1,
-                },
-                "origin": ANY,
-                "protocol": "WEBSOCKET",
-                "credentials": {
-                    "credentials": "LOGIN_PASSWORD",
-                    "credentials_data": {"username": "root", "login_at": ANY},
-                },
-            },
-            "event": "AUTHENTICATION",
-            "event_data": {
-                "credentials": {
-                    "credentials": "LOGIN_PASSWORD",
-                    "credentials_data": {"username": "root", "login_at": ANY},
-                },
-                "error": None,
-            },
-            "success": True,
-        }
-    ], include_logins=True):
-        with client():
-            pass
-
-
-def test_password_login_failed():
-    with expect_audit_log([
-        {
-            "event": "AUTHENTICATION",
-            "event_data": {
-                "credentials": {
-                    "credentials": "LOGIN_PASSWORD",
-                    "credentials_data": {"username": "invalid"},
-                },
-                "error": "Bad username or password",
-            },
-            "success": False,
-        }
-    ], include_logins=True):
-        with client(auth=("invalid", ""), auth_required=False):
-            pass
-
-
-def test_token_login():
-    token = call("auth.generate_token", 300, {}, True)
-
-    with client(auth=None) as c:
-        with expect_audit_log([
-            {
-                "event": "AUTHENTICATION",
-                "event_data": {
-                    "credentials": {
-                        "credentials": "TOKEN",
-                        "credentials_data": {
-                            "parent": {
-                                "credentials": "LOGIN_PASSWORD",
-                                "credentials_data": {"username": "root", "login_at": ANY},
-                            },
-                            "username": "root",
-                        },
-                    },
-                    "error": None,
-                },
-                "success": True,
-            }
-        ], include_logins=True):
-            assert c.call("auth.login_with_token", token)
-
-
-def test_token_login_failed():
-    with client(auth=None) as c:
-        with expect_audit_log([
-            {
-                "event": "AUTHENTICATION",
-                "event_data": {
-                    "credentials": {
-                        "credentials": "TOKEN",
-                        "credentials_data": {
-                            "token": "invalid_token",
-                        },
-                    },
-                    "error": "Invalid token",
-                },
-                "success": False,
-            }
-        ], include_logins=True):
-            c.call("auth.login_with_token", "invalid_token")
-
-
-def test_token_attributes_login_failed():
-    token = call("auth.generate_token", 300, {"filename": "debug.txz", "job": 1020}, True)
-
-    with client(auth=None) as c:
-        with expect_audit_log([
-            {
-                "event": "AUTHENTICATION",
-                "event_data": {
-                    "credentials": {
-                        "credentials": "TOKEN",
-                        "credentials_data": {
-                            "token": token,
-                        },
-                    },
-                    "error": "Bad token",
-                },
-                "success": False,
-            }
-        ], include_logins=True):
-            c.call("auth.login_with_token", token)
-
-
-def test_api_key_login():
-    with api_key() as key:
-        with client(auth=None) as c:
-            with expect_audit_log([
-                {
-                    "event": "AUTHENTICATION",
-                    "event_data": {
-                        "credentials": {
-                            "credentials": "API_KEY",
-                            "credentials_data": {
-                                "username": "root",
-                                "login_at": ANY,
-                                "api_key": {"id": ANY, "name": ANY},
-                            },
-                        },
-                        "error": None,
-                    },
-                    "success": True,
-                }
-            ], include_logins=True):
-                assert c.call("auth.login_with_api_key", key)
-
-
-def test_api_key_login_failed():
-    with client(auth=None) as c:
-        with expect_audit_log([
-            {
-                "event": "AUTHENTICATION",
-                "event_data": {
-                    "credentials": {
-                        "credentials": "API_KEY",
-                        "credentials_data": {
-                            "api_key": "invalid_api_key",
-                            "username": None
-                        },
-                    },
-                    "error": "Invalid API key",
-                },
-                "success": False,
-            }
-        ], include_logins=True):
-            c.call("auth.login_with_api_key", "invalid_api_key")
-
-
-def test_2fa_login(sharing_admin_user):
-    user_obj_id = call('user.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
-
-    with enabled_twofactor_auth():
-        call('user.renew_2fa_secret', sharing_admin_user.username, {'interval': 60})
-        secret = get_user_secret(user_obj_id)
-
-        with client(auth=None) as c:
-            resp = c.call('auth.login_ex', {
-                'mechanism': 'PASSWORD_PLAIN',
-                'username': sharing_admin_user.username,
-                'password': sharing_admin_user.password
-            })
-            assert resp['response_type'] == 'OTP_REQUIRED'
-            assert resp['username'] == sharing_admin_user.username
-
-            # simulate user fat-fingering the OTP token and then getting it correct on second attempt
-            otp = get_2fa_totp_token(secret)
-
-            with expect_audit_log([
-                {
-                    "event": "AUTHENTICATION",
-                    "event_data": {
-                        "credentials": {
-                            "credentials": "LOGIN_TWOFACTOR",
-                            "credentials_data": {
-                                "username": sharing_admin_user.username,
-                            },
-                        },
-                        "error": "One-time token validation failed.",
-                    },
-                    "success": False,
-                }
-            ], include_logins=True):
-                resp = c.call('auth.login_ex', {
-                    'mechanism': 'OTP_TOKEN',
-                    'otp_token': 'canary'
-                })
-                assert resp['response_type'] == 'OTP_REQUIRED'
-                assert resp['username'] == sharing_admin_user.username
-
-            with expect_audit_log([
-                {
-                    "event": "AUTHENTICATION",
-                    "event_data": {
-                        "credentials": {
-                            "credentials": "LOGIN_TWOFACTOR",
-                            "credentials_data": {
-                                "username": sharing_admin_user.username,
-                                "login_at": ANY,
-                            },
-                        },
-                        "error": None,
-                    },
-                    "success": True,
-                }
-            ], include_logins=True):
-                resp = c.call('auth.login_ex', {
-                    'mechanism': 'OTP_TOKEN',
-                    'otp_token': otp
-                })
-
-                assert resp['response_type'] == 'SUCCESS'
-
-
-@pytest.mark.parametrize('logfile', ('/var/log/messages', '/var/log/syslog'))
-def test_check_syslog_leak(logfile):
-    entries = ssh(f'grep @cee {logfile}', check=False)
-    assert '@cee' not in entries
diff --git a/tests/api2/test_auth_me.py b/tests/api2/test_auth_me.py
deleted file mode 100644
index c5cb7f2d24ba4..0000000000000
--- a/tests/api2/test_auth_me.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.utils import call, client
-
-
-def test_works():
-    user = call("auth.me")
-
-    assert user["pw_uid"] == 0
-    assert user["pw_name"] == "root"
-    assert user['two_factor_config'] is not None
-    assert user['privilege']['webui_access']
-
-
-def test_works_for_token():
-    token = call("auth.generate_token", 300)
-
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", token)
-
-        user = c.call("auth.me")
-
-        assert user["pw_uid"] == 0
-        assert user["pw_name"] == "root"
-        assert user['two_factor_config'] is not None
-        assert 'SYS_ADMIN' in user['account_attributes']
-        assert 'LOCAL' in user['account_attributes']
-
-
-def test_attributes():
-    user = call("auth.me")
-    assert "test" not in user["attributes"]
-
-    call("auth.set_attribute", "test", "value")
-
-    user = call("auth.me")
-    assert user["attributes"]["test"] == "value"
-
-    call("auth.set_attribute", "test", "new_value")
-
-    user = call("auth.me")
-    assert user["attributes"]["test"] == "new_value"
-
-
-def test_distinguishes_attributes():
-    builtin_administrators_group_id = call(
-        "datastore.query",
-        "account.bsdgroups",
-        [["group", "=", "builtin_administrators"]],
-        {"get": True, "prefix": "bsdgrp_"},
-    )["id"]
-
-    with user({
-        "username": "admin",
-        "full_name": "Admin",
-        "group_create": True,
-        "groups": [builtin_administrators_group_id],
-        "home": "/nonexistent",
-        "password": "test1234",
-    }) as admin:
-        with client(auth=("admin", "test1234")) as c:
-            me = c.call("auth.me")
-            assert "test" not in me["attributes"]
-
-            c.call("auth.set_attribute", "test", "value")
-
-            me = c.call("auth.me")
-            assert me["attributes"]["test"] == "value"
-
-            c.call("auth.set_attribute", "test", "new_value")
-
-            me = c.call("auth.me")
-            assert me["attributes"]["test"] == "new_value"
-            assert me['two_factor_config'] is not None
-            assert 'SYS_ADMIN' not in me['account_attributes']
-            assert 'LOCAL' in me['account_attributes']
-            assert me['privilege']['webui_access']
-
-    assert not call("datastore.query", "account.bsdusers_webui_attribute", [["uid", "=", admin["uid"]]])
-
-
-@pytest.mark.parametrize("role,expected",  [
-    (["READONLY_ADMIN", "FILESYSTEM_ATTRS_WRITE"], True),
-    (["READONLY_ADMIN"], True),
-    (["SHARING_ADMIN"], True),
-    (["FILESYSTEM_ATTRS_WRITE"], False)
-])
-def test_webui_access(role, expected):
-    with unprivileged_user_client(roles=role) as c:
-        me = c.call('auth.me')
-        assert me['privilege']['webui_access'] == expected
diff --git a/tests/api2/test_auth_otp.py b/tests/api2/test_auth_otp.py
deleted file mode 100644
index bad920b92863a..0000000000000
--- a/tests/api2/test_auth_otp.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import io
-import json
-
-import pytest
-
-from middlewared.test.integration.utils import call, session, ssh, url
-
-
-@pytest.fixture(scope="module")
-def otp_enabled():
-    call("auth.twofactor.update", {"enabled": True})
-
-    try:
-        yield
-    finally:
-        ssh("midclt call auth.twofactor.update '{\"enabled\": false}'")
-
-
-def test_otp_http_basic_auth(otp_enabled):
-    with session() as s:
-        r = s.put(f"{url()}/api/v2.0/auth/twofactor/", data=json.dumps({"enabled": False}))
-        assert r.status_code == 401
-        assert r.text == "HTTP Basic Auth is unavailable when OTP is enabled"
-
-
-def test_otp_http_basic_auth_upload(otp_enabled):
-    with session() as s:
-        r = s.post(
-            f"{url()}/_upload/",
-            data={
-                "data": json.dumps({
-                    "method": "filesystem.put",
-                    "params": ["/tmp/upload"],
-                })
-            },
-            files={
-                "file": io.BytesIO(b"test"),
-            },
-        )
-        assert r.status_code == 401
-        assert r.text == "HTTP Basic Auth is unavailable when OTP is enabled"
-
-
diff --git a/tests/api2/test_auth_token.py b/tests/api2/test_auth_token.py
deleted file mode 100644
index a4f89b6ab5235..0000000000000
--- a/tests/api2/test_auth_token.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import io
-import json
-
-import pytest
-import requests
-
-from middlewared.test.integration.assets.account import unprivileged_user as unprivileged_user_template
-from middlewared.test.integration.utils import call, client, ssh
-from middlewared.test.integration.utils.client import truenas_server
-from middlewared.test.integration.utils.shell import assert_shell_works
-
-
-@pytest.fixture(scope="module")
-def download_token():
-    return call("auth.generate_token", 300, {"filename": "debug.txz", "job": 1020}, True)
-
-
-def test_download_auth_token_cannot_be_used_for_upload(download_token):
-    r = requests.post(
-        f"http://{truenas_server.ip}/_upload",
-        headers={"Authorization": f"Token {download_token}"},
-        data={
-            "data": json.dumps({
-                "method": "filesystem.put",
-                "params": ["/tmp/upload"],
-            })
-        },
-        files={
-            "file": io.BytesIO(b"test"),
-        },
-        timeout=10
-    )
-    assert r.status_code == 403
-
-
-def test_download_auth_token_cannot_be_used_for_websocket_auth(download_token):
-    with client(auth=None) as c:
-        assert not c.call("auth.login_with_token", download_token)
-
-
-@pytest.mark.timeout(30)
-def test_token_created_by_token_can_use_shell():
-    with client() as c:
-        token = c.call("auth.generate_token", 300, {}, True)
-
-        with client(auth=None) as c2:
-            assert c2.call("auth.login_with_token", token)
-
-            token2 = c2.call("auth.generate_token", 300, {}, True)
-            assert_shell_works(token2, "root")
-
-
-@pytest.fixture(scope="module")
-def unprivileged_user():
-    with unprivileged_user_template(
-        username="test",
-        group_name="test",
-        privilege_name="test",
-        allowlist=[{"method": "CALL", "resource": "system.info"}],
-        web_shell=True,
-    ):
-        yield
-
-
-def test_login_with_token_match_origin(unprivileged_user):
-    token = ssh(
-        "sudo -u test midclt -u ws://localhost/api/current -U test -P test1234 call auth.generate_token 300 '{}' true"
-    ).strip()
-
-    with client(auth=None) as c:
-        assert not c.call("auth.login_with_token", token)
-
-
-def test_login_with_token_no_match_origin(unprivileged_user):
-    token = ssh(
-        "sudo -u test midclt -u ws://localhost/api/current -U test -P test1234 call auth.generate_token 300 '{}' false"
-    ).strip()
-
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", token)
-
-
-def test_token_is_for_one_time_use():
-    token = call("auth.generate_token", 300)
-
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", token)
-
-    with client(auth=None) as c:
-        assert not c.call("auth.login_with_token", token)
-
-
-def test_kill_all_tokens_on_session_termination():
-    token = call("auth.generate_token", 300)
-
-    with client(auth=None) as c:
-        assert c.call("auth.login_with_token", token)
-
-        token = c.call("auth.generate_token")
-
-        session = c.call("auth.sessions", [["current", "=", True]], {"get": True})
-        call("auth.terminate_session", session["id"])
-
-        with client(auth=None) as c:
-            assert not c.call("auth.login_with_token", token)
diff --git a/tests/api2/test_authenticator_assurance_level.py b/tests/api2/test_authenticator_assurance_level.py
deleted file mode 100644
index 2ec1a61f64280..0000000000000
--- a/tests/api2/test_authenticator_assurance_level.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import errno
-import pytest
-
-from contextlib import contextmanager
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.two_factor_auth import enabled_twofactor_auth, get_user_secret, get_2fa_totp_token
-from middlewared.test.integration.assets.api_key import api_key
-from middlewared.test.integration.utils import client, call
-
-
-@contextmanager
-def authenticator_assurance_level(level):
-    """ temporarily increase level """
-    with client() as c:
-        c.call('auth.set_authenticator_assurance_level', level)
-        try:
-            yield
-        finally:
-            c.call('auth.set_authenticator_assurance_level', 'LEVEL_1')
-
-
-@pytest.fixture(scope='function')
-def sharing_admin_user(unprivileged_user_fixture):
-    privilege = call('privilege.query', [['local_groups.0.group', '=', unprivileged_user_fixture.group_name]])
-    assert len(privilege) > 0, 'Privilege not found'
-    call('privilege.update', privilege[0]['id'], {'roles': ['SHARING_ADMIN']})
-
-    try:
-        yield unprivileged_user_fixture
-    finally:
-        call('privilege.update', privilege[0]['id'], {'roles': []})
-
-
-@pytest.mark.parametrize('level,expected', [
-    ('LEVEL_1', ['API_KEY_PLAIN', 'TOKEN_PLAIN', 'PASSWORD_PLAIN']),
-    ('LEVEL_2', ['PASSWORD_PLAIN']),
-])
-def test_mechanism_choices(level, expected):
-    with authenticator_assurance_level(level):
-        assert call('auth.mechanism_choices') == expected
-
-
-def test_level2_api_key_plain():
-    """ API_KEY_PLAIN lacks replay resistance
-    and so authentication attempts must fail with EOPNOTSUPP
-    """
-    with authenticator_assurance_level('LEVEL_2'):
-        with api_key() as key:
-            with client(auth=None) as c:
-                with pytest.raises(CallError) as ce:
-                    c.call('auth.login_ex', {
-                        'mechanism': 'API_KEY_PLAIN',
-                        'username': 'root',
-                        'api_key': key
-                    })
-
-                assert ce.value.errno == errno.EOPNOTSUPP
-
-
-def test_level2_password_plain_no_twofactor():
-    """ PASSWORD_PLAIN lacks replay resistance
-    and so authentication attempts must fail with EOPNOTSUPP
-    """
-    with authenticator_assurance_level('LEVEL_2'):
-        with pytest.raises(CallError) as ce:
-            with client():
-                pass
-
-        assert ce.value.errno == errno.EOPNOTSUPP
-
-
-def test_level2_password_with_otp(sharing_admin_user):
-    """ PASSWORD_PLAIN with 2FA is sufficient to authenticate """
-    user_obj_id = call('user.query', [['username', '=', sharing_admin_user.username]], {'get': True})['id']
-
-    with enabled_twofactor_auth():
-        call('user.renew_2fa_secret', sharing_admin_user.username, {'interval': 60})
-        secret = get_user_secret(user_obj_id)
-
-        with authenticator_assurance_level('LEVEL_2'):
-            with client(auth=None) as c:
-                resp = c.call('auth.login_ex', {
-                    'mechanism': 'PASSWORD_PLAIN',
-                    'username': sharing_admin_user.username,
-                    'password': sharing_admin_user.password
-                })
-                assert resp['response_type'] == 'OTP_REQUIRED'
-                assert resp['username'] == sharing_admin_user.username
-
-                resp = c.call('auth.login_ex', {
-                    'mechanism': 'OTP_TOKEN',
-                    'otp_token': get_2fa_totp_token(secret)
-                })
-
-                assert resp['response_type'] == 'SUCCESS'
diff --git a/tests/api2/test_block_hooks.py b/tests/api2/test_block_hooks.py
deleted file mode 100644
index eeb0770190a51..0000000000000
--- a/tests/api2/test_block_hooks.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import uuid
-
-import pytest
-
-from middlewared.test.integration.utils import client, mock
-
-
-
-@pytest.mark.parametrize("block", [True, False])
-def test_block_hooks(block):
-    hook_name = str(uuid.uuid4())
-
-    with mock("test.test1", """
-        async def mock(self, hook_name, blocked_hooks):
-            from pathlib import Path
-
-            sentinel = Path("/tmp/block_hooks_sentinel")
-            
-            async def hook(middleware):
-                sentinel.write_text("")        
-
-            self.middleware.register_hook(hook_name, hook, blockable=True, sync=True)
-
-            sentinel.unlink(missing_ok=True)
-            with self.middleware.block_hooks(*blocked_hooks):
-                await self.middleware.call_hook(hook_name)
-            
-            return sentinel.exists()
-    """):
-        with client() as c:
-            assert c.call("test.test1", hook_name, [hook_name] if block else []) == (not block)
diff --git a/tests/api2/test_boot_attach_replace_detach.py b/tests/api2/test_boot_attach_replace_detach.py
deleted file mode 100644
index 6d9245c84f06d..0000000000000
--- a/tests/api2/test_boot_attach_replace_detach.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call
-from auto_config import ha
-
-if not ha:
-    # the HA VMs only have 1 extra disk at time
-    # of writing this. QE is aware and is working
-    # on adding more disks to them so in the meantime
-    # we have to skip this test since it will fail
-    # 100% of the time on HA VMs.
-
-    @pytest.mark.timeout(600)
-    def test_boot_attach_replace_detach():
-        existing_disks = call("boot.get_disks")
-        assert len(existing_disks) == 1
-
-        unused = call("disk.get_unused")
-        to_attach = unused[0]["name"]
-        replace_with = unused[1]["name"]
-
-        # Attach a disk and wait for resilver to finish
-        call("boot.attach", to_attach, job=True)
-        while True:
-            state = call("boot.get_state")
-            if not (
-                state["scan"] and
-                state["scan"]["function"] == "RESILVER" and
-                state["scan"]["state"] == "SCANNING"
-            ):
-                break
-
-        assert state["topology"]["data"][0]["type"] == "MIRROR"
-
-        assert state["topology"]["data"][0]["children"][0]["status"] == "ONLINE"
-
-        to_replace = state["topology"]["data"][0]["children"][1]["name"]
-        assert to_replace.startswith(to_attach)
-        assert state["topology"]["data"][0]["children"][1]["status"] == "ONLINE"
-
-        # Replace newly attached disk
-        call("boot.replace", to_replace, replace_with, job=True)
-        # Resilver is a part of replace routine
-        state = call("boot.get_state")
-
-        assert state["topology"]["data"][0]["type"] == "MIRROR"
-
-        assert state["topology"]["data"][0]["children"][0]["status"] == "ONLINE"
-
-        to_detach = state["topology"]["data"][0]["children"][1]["name"]
-        assert to_detach.startswith(replace_with)
-        assert state["topology"]["data"][0]["children"][1]["status"] == "ONLINE"
-
-        # Detach replaced disk, returning the pool to its initial state
-        call("boot.detach", to_detach)
-
-        assert len(call("boot.get_disks")) == 1
diff --git a/tests/api2/test_boot_environments.py b/tests/api2/test_boot_environments.py
deleted file mode 100644
index 7d92ecdc25004..0000000000000
--- a/tests/api2/test_boot_environments.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import contextlib
-
-import pytest
-
-from middlewared.service_exception import ValidationError
-from middlewared.test.integration.utils import call, ssh
-
-TEMP_BE_NAME = "temp_be_name"
-
-
-@pytest.fixture(scope="module")
-def orig_be():
-    return call(
-        "boot.environment.query",
-        [["active", "=", True], ["activated", "=", True]],
-        {"get": True},
-    )
-
-
-def be_query(be_name, get=True):
-    return call("boot.environment.query", [["id", "=", be_name]], {"get": get})
-
-
-@contextlib.contextmanager
-def simulate_can_activate_is_false(be_ds):
-    prop = "truenas:kernel_version"
-    orig_value = ssh(f"zfs get {prop} {be_ds}").strip()
-    assert orig_value
-    try:
-        temp = f"{prop}=-"
-        ssh(f"zfs set {temp!r} {be_ds}")
-        yield
-    finally:
-        orig = f"{prop}={orig_value}"
-        ssh(f"zfs set {orig!r} {be_ds}")
-
-
-def validate_activated_be(be_name, activate_string="R"):
-    """Validate the boot environment shows that it is activated
-    according to OS perspective."""
-    for line in ssh("zectl list -H").splitlines():
-        values = line.split()
-        be, activated = values[0], values[1]
-        if be.strip() == be_name and activated.strip() == activate_string:
-            break
-    else:
-        assert False, f"Failed to validate activated BE: {be_name!r}"
-
-
-def get_zfs_property(ds_name, property):
-    for line in ssh(f"zfs get {property} {ds_name} -H").splitlines():
-        return line.split()
-
-
-def test_failure_conditions_for_activate(orig_be):
-    """
-    1. test activating a non-existent BE fails
-    2. test activating an already activated BE fails
-    3. test destroying the active BE fails
-    """
-    with pytest.raises(ValidationError) as ve:
-        call("boot.environment.activate", {"id": "CANARY"})
-    assert ve.value.attribute == "boot.environment.activate"
-    assert ve.value.errmsg == "'CANARY' not found"
-
-    with pytest.raises(ValidationError) as ve:
-        call("boot.environment.activate", {"id": orig_be["id"]})
-    assert ve.value.attribute == "boot.environment.activate"
-    assert ve.value.errmsg == f"{orig_be['id']!r} is already activated"
-
-    with pytest.raises(ValidationError) as ve:
-        call("boot.environment.destroy", {"id": orig_be["id"]})
-    assert ve.value.attribute == "boot.environment.destroy"
-    assert ve.value.errmsg == "Deleting the active boot environment is not allowed"
-
-
-def test_clone_activate_keep_and_destroy(orig_be):
-    """Perform various boot environment operations via the API
-    and verify functionality works as expected.
-    """
-    tmp = call("boot.environment.clone", {"id": orig_be["id"], "target": TEMP_BE_NAME})
-    assert tmp["id"] == TEMP_BE_NAME
-
-    rv = ssh("zectl list -H").strip()
-    assert TEMP_BE_NAME in rv, rv
-
-    with simulate_can_activate_is_false(tmp["dataset"]):
-        with pytest.raises(ValidationError) as ve:
-            call("boot.environment.activate", {"id": tmp["id"]})
-    assert ve.value.attribute == "boot.environment.activate"
-    assert ve.value.errmsg == f"{tmp['id']!r} can not be activated"
-
-    rv = call("boot.environment.activate", {"id": TEMP_BE_NAME})
-    assert rv["id"] == TEMP_BE_NAME
-    assert rv["activated"] is True
-
-    validate_activated_be(TEMP_BE_NAME)
-
-    rv = call("boot.environment.activate", {"id": orig_be["id"]})
-    assert rv["activated"] is True
-
-    validate_activated_be(orig_be["id"], activate_string="NR")
-
-    rv = call("boot.environment.keep", {"id": orig_be["id"], "value": True})
-    assert rv["keep"] is True
-
-    values = get_zfs_property(orig_be["dataset"], "zectl:keep")
-    assert values[2] == "True"
-
-    rv = call("boot.environment.keep", {"id": orig_be["id"], "value": False})
-    assert rv["keep"] is False
-
-    values = get_zfs_property(orig_be["dataset"], "zectl:keep")
-    assert values[2] == "False"
-
-    rv = call("boot.environment.destroy", {"id": TEMP_BE_NAME})
-    assert rv is None
-
-    rv = call("boot.environment.query", [["id", "=", TEMP_BE_NAME]])
-    assert rv == [], rv
-
-    rv = ssh("zectl list -H").strip()
-    assert TEMP_BE_NAME not in rv, rv
-
-
-def test_promote_current_datasets():
-    var_log = ssh("df | grep /var/log").split()[0]
-    snapshot_name = "snap-1"
-    snapshot = f"{var_log}@{snapshot_name}"
-    ssh(f"zfs snapshot {snapshot}")
-    try:
-        clone = "boot-pool/ROOT/clone"
-        ssh(f"zfs clone {snapshot} {clone}")
-        try:
-            ssh(f"zfs promote {clone}")
-            assert (
-                ssh(f"zfs get -H -o value origin {var_log}").strip()
-                == f"{clone}@{snapshot_name}"
-            )
-            call("boot.environment.promote_current_datasets")
-            assert ssh(f"zfs get -H -o value origin {var_log}").strip() == "-"
-        finally:
-            ssh(f"zfs destroy {clone}")
-    finally:
-        ssh(f"zfs destroy {snapshot}")
diff --git a/tests/api2/test_boot_format.py b/tests/api2/test_boot_format.py
deleted file mode 100644
index 8dbb0177ba89a..0000000000000
--- a/tests/api2/test_boot_format.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from middlewared.test.integration.utils import call
-
-
-def test_optimal_disk_usage():
-    disk = call('disk.get_unused')[0]
-    data_size = (
-        disk['size'] -
-        1 * 1024 * 1024 -  # BIOS boot
-        512 * 1024 * 1024 -  # EFI
-        73 * 512  # GPT + alignment
-    )
-    # Will raise an exception if we fail to format the disk with given harsh restrictions
-    call('boot.format', disk['name'], {'size': data_size})
diff --git a/tests/api2/test_boot_scrub.py b/tests/api2/test_boot_scrub.py
deleted file mode 100644
index babcce79ef507..0000000000000
--- a/tests/api2/test_boot_scrub.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from time import time, sleep
-
-from middlewared.test.integration.utils import call
-
-
-def test_get_boot_scrub(request):
-    job_id = call("boot.scrub")
-    stop_time = time() + 600
-    while True:
-        job = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
-        if job["state"] in ("RUNNING", "WAITING"):
-            if stop_time <= time():
-                assert False, "Job Timeout\n\n" + job
-                break
-            sleep(1)
-        else:
-            assert job["state"] == "SUCCESS", job
-            break
diff --git a/tests/api2/test_can_access_as_user.py b/tests/api2/test_can_access_as_user.py
deleted file mode 100644
index a457988266de2..0000000000000
--- a/tests/api2/test_can_access_as_user.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import contextlib
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset, pool
-from middlewared.test.integration.utils import call, ssh
-
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-
-@contextlib.contextmanager
-def file(name, user, group, permissions):
-    with dataset('test_perms', pool=pool) as test_dataset:
-        path = os.path.join('/mnt', test_dataset, name)
-        with file_at_path(path, user, group, permissions):
-            yield path
-
-
-@contextlib.contextmanager
-def file_at_path(path, user, group, permissions):
-    ssh(f'install -o {user} -g {group} -m {permissions} /dev/null {path}')
-    try:
-        yield path
-    finally:
-        ssh(f'rm -f {path}')
-
-
-@contextlib.contextmanager
-def directory(name, user, group, permissions):
-    with dataset('test_perms', pool=pool) as test_dataset:
-        path = os.path.join('/mnt', test_dataset, name)
-        ssh(f'mkdir -p -m {permissions} {path}')
-        ssh(f'chown -R {user}:{group} {path}')
-
-        try:
-            yield path
-        finally:
-            ssh(f'rm -rf {path}')
-
-
-def test_non_authorized_user_access():
-    with file('test', 'root', 'root', '700') as file_path:
-        for perm_check in ('read', 'write', 'execute'):
-            assert call('filesystem.can_access_as_user', 'nobody', file_path, {perm_check: True}) is False
-
-
-def test_authorized_user_access():
-    for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
-        with file('test', user, group, '700') as file_path:
-            for perm_check in ('read', 'write', 'execute'):
-                assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is True
-
-
-def test_read_access():
-    for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
-        with file('test', user, group, '400') as file_path:
-            for perm_check, value in (('read', True), ('write', False), ('execute', False)):
-                assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value
-
-
-def test_write_access():
-    for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
-        with file('test', user, group, '200') as file_path:
-            for perm_check, value in (('read', False), ('write', True), ('execute', False)):
-                assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value
-
-
-def test_execute_access():
-    for user, group in (('apps', 'apps'), ('nobody', 'nogroup')):
-        with file('test', user, group, '100') as file_path:
-            for perm_check, value in (('read', False), ('write', False), ('execute', True)):
-                assert call('filesystem.can_access_as_user', user, file_path, {perm_check: True}) is value
-
-
-def test_nested_perm_execute_check():
-    with directory('test_dir', 'root', 'root', '700') as dir_path:
-        file_path = os.path.join(dir_path, 'testfile')
-        with file_at_path(file_path, 'root', 'root', '777'):
-            assert call('filesystem.can_access_as_user', 'apps', file_path, {'execute': True}) is False
diff --git a/tests/api2/test_catalog_roles.py b/tests/api2/test_catalog_roles.py
deleted file mode 100644
index 4233c88b353eb..0000000000000
--- a/tests/api2/test_catalog_roles.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize('method, role, valid_role, valid_role_exception', (
-    ('catalog.get_app_details', 'CATALOG_READ', True, True),
-    ('catalog.get_app_details', 'CATALOG_WRITE', True, True),
-    ('catalog.get_app_details', 'DOCKER_READ', False, False),
-    ('app.latest', 'CATALOG_READ', True, False),
-    ('app.latest', 'CATALOG_WRITE', True, False),
-    ('app.latest', 'APPS_WRITE', True, False),
-    ('app.available', 'CATALOG_READ', True, False),
-    ('app.available', 'CATALOG_WRITE', True, False),
-    ('app.available', 'APPS_WRITE', True, False),
-    ('app.categories', 'CATALOG_READ', True, False),
-    ('app.categories', 'CATALOG_WRITE', True, False),
-    ('app.categories', 'APPS_WRITE', True, False),
-    ('app.similar', 'CATALOG_READ', True, True),
-    ('app.similar', 'CATALOG_WRITE', True, True),
-    ('app.similar', 'APPS_WRITE', True, True),
-    ('catalog.apps', 'CATALOG_READ', True, False),
-    ('catalog.apps', 'CATALOG_WRITE', True, False),
-    ('catalog.apps', 'DOCKER_READ', False, False),
-    ('catalog.sync', 'CATALOG_READ', False, False),
-    ('catalog.sync', 'CATALOG_WRITE', True, False),
-    ('catalog.update', 'CATALOG_READ', False, True),
-    ('catalog.update', 'CATALOG_WRITE', True, True),
-
-))
-def test_apps_roles(unprivileged_user_fixture, method, role, valid_role, valid_role_exception):
-    common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=valid_role_exception)
diff --git a/tests/api2/test_catalogs.py b/tests/api2/test_catalogs.py
deleted file mode 100644
index 4c0e993daf7c9..0000000000000
--- a/tests/api2/test_catalogs.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import os.path
-
-import pytest
-
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.docker import IX_APPS_CATALOG_PATH
-
-
-@pytest.fixture(scope='module')
-def docker_pool(request):
-    with another_pool() as pool:
-        yield pool['name']
-
-
-@pytest.mark.dependency(name='unconfigure_apps')
-def test_unconfigure_apps():
-    config = call('docker.update', {'pool': None}, job=True)
-    assert config['pool'] is None, config
-
-
-@pytest.mark.dependency(depends=['unconfigure_apps'])
-def test_catalog_sync():
-    call('catalog.sync', job=True)
-    assert call('catalog.synced') is True
-
-
-@pytest.mark.dependency(depends=['unconfigure_apps'])
-def test_catalog_cloned_location():
-    config = call('catalog.config')
-    assert config['location'] == '/var/run/middleware/ix-apps/catalogs', config
-
-
-@pytest.mark.dependency(depends=['unconfigure_apps'])
-def test_apps_are_being_reported():
-    assert call('app.available', [], {'count': True}) != 0
-
-
-@pytest.mark.dependency(name='docker_setup')
-def test_docker_setup(docker_pool):
-    config = call('docker.update', {'pool': docker_pool}, job=True)
-    assert config['pool'] == docker_pool, config
-
-
-@pytest.mark.dependency(depends=['docker_setup'])
-def test_catalog_synced_properly():
-    assert call('catalog.synced') is True
-
-
-@pytest.mark.dependency(depends=['docker_setup'])
-def test_catalog_sync_location():
-    assert call('catalog.config')['location'] == IX_APPS_CATALOG_PATH
-
-
-@pytest.mark.dependency(depends=['docker_setup'])
-def test_catalog_location_existence():
-    docker_config = call('docker.config')
-    assert docker_config['pool'] is not None
-
-    assert call('filesystem.statfs', IX_APPS_CATALOG_PATH)['source'] == os.path.join(
-        docker_config['dataset'], 'truenas_catalog'
-    )
-
-
-@pytest.mark.dependency(depends=['docker_setup'])
-def test_apps_are_being_reported_after_docker_setup():
-    assert call('app.available', [], {'count': True}) != 0
-
-
-@pytest.mark.dependency(depends=['docker_setup'])
-def test_categories_are_being_reported():
-    assert len(call('app.categories')) != 0
-
-
-@pytest.mark.dependency(depends=['docker_setup'])
-def test_app_version_details():
-    app_details = call('catalog.get_app_details', 'plex', {'train': 'stable'})
-    assert app_details['name'] == 'plex', app_details
-
-    assert len(app_details['versions']) != 0, app_details
-
-
-@pytest.mark.dependency(depends=['docker_setup'])
-def test_unconfigure_apps_after_setup():
-    config = call('docker.update', {'pool': None}, job=True)
-    assert config['pool'] is None, config
diff --git a/tests/api2/test_certificate_roles.py b/tests/api2/test_certificate_roles.py
deleted file mode 100644
index 94144e1bb4d91..0000000000000
--- a/tests/api2/test_certificate_roles.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize('method, role, valid_role', (
-    ('certificate.profiles', 'CERTIFICATE_READ', True),
-    ('certificateauthority.profiles', 'CERTIFICATE_AUTHORITY_READ', True),
-    ('certificate.profiles', 'CERTIFICATE_AUTHORITY_READ', False),
-    ('certificateauthority.profiles', 'CERTIFICATE_READ', False),
-))
-def test_profiles_read_roles(unprivileged_user_fixture, method, role, valid_role):
-    common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=False)
-
-
-@pytest.mark.parametrize('role, valid_role', (
-    ('CERTIFICATE_AUTHORITY_WRITE', True),
-    ('CERTIFICATE_AUTHORITY_READ', False),
-))
-def test_certificate_authority_create_role(unprivileged_user_fixture, role, valid_role):
-    common_checks(unprivileged_user_fixture, 'certificateauthority.create', role, valid_role, method_args=[{}])
-
-
-@pytest.mark.parametrize('role, valid_role', (
-    ('CERTIFICATE_WRITE', True),
-    ('CERTIFICATE_READ', False),
-))
-def test_certificate_create_role(unprivileged_user_fixture, role, valid_role):
-    common_checks(unprivileged_user_fixture, 'certificate.create', role, valid_role, method_args=[], method_kwargs={'job': True})
-
-
-@pytest.mark.parametrize('role, valid_role', (
-    ('CERTIFICATE_AUTHORITY_WRITE', True),
-    ('CERTIFICATE_AUTHORITY_READ', False),
-))
-def test_signing_csr_role(unprivileged_user_fixture, role, valid_role):
-    common_checks(unprivileged_user_fixture, 'certificateauthority.ca_sign_csr', role, valid_role, method_args=[{
-        'ca_id': 1,
-        'csr_cert_id': 1,
-        'name': 'test_csr_signing_role',
-    }])
diff --git a/tests/api2/test_certs.py b/tests/api2/test_certs.py
deleted file mode 100644
index 7a742a9080c89..0000000000000
--- a/tests/api2/test_certs.py
+++ /dev/null
@@ -1,652 +0,0 @@
-import datetime
-import os.path
-import textwrap
-
-import cryptography
-import pytest
-from cryptography import x509
-from cryptography.hazmat.backends import default_backend
-from cryptography.hazmat.primitives import serialization, hashes
-from cryptography.hazmat.primitives.asymmetric import rsa
-from cryptography.x509 import Name, NameAttribute, CertificateBuilder, BasicConstraints
-
-from middlewared.test.integration.assets.crypto import (
-    certificate_signing_request, get_cert_params, intermediate_certificate_authority, root_certificate_authority
-)
-from middlewared.test.integration.utils import call
-from truenas_api_client import ValidationErrors
-
-
-# We would like to test the following cases
-# Creating root CA
-# Creating intermediate CA
-# Importing CA
-# Creating certificate from root/intermediate CAs
-# Create CSR
-# Signing CSR
-
-def test_creating_root_ca():
-    root_ca = call('certificateauthority.create', {
-        **get_cert_params(),
-        'name': 'test_root_ca',
-        'create_type': 'CA_CREATE_INTERNAL',
-    })
-    try:
-        assert root_ca['CA_type_internal'] is True, root_ca
-    finally:
-        call('certificateauthority.delete', root_ca['id'])
-
-
-def test_root_ca_issuer_reported_correctly():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        assert root_ca['issuer'] == 'self-signed', root_ca
-
-
-def test_creating_intermediate_ca():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        intermediate_ca = call('certificateauthority.create', {
-            **get_cert_params(),
-            'signedby': root_ca['id'],
-            'name': 'test_intermediate_ca',
-            'create_type': 'CA_CREATE_INTERMEDIATE',
-        })
-        try:
-            assert intermediate_ca['CA_type_intermediate'] is True, intermediate_ca
-        finally:
-            call('certificateauthority.delete', intermediate_ca['id'])
-
-
-def test_ca_intermediate_issuer_reported_correctly():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        intermediate_ca = call('certificateauthority.create', {
-            **get_cert_params(),
-            'signedby': root_ca['id'],
-            'name': 'test_intermediate_ca',
-            'create_type': 'CA_CREATE_INTERMEDIATE',
-        })
-        root_ca = call('certificateauthority.get_instance', root_ca['id'])
-        try:
-            assert intermediate_ca['issuer'] == root_ca, intermediate_ca
-        finally:
-            call('certificateauthority.delete', intermediate_ca['id'])
-
-
-def test_cert_chain_of_intermediate_ca_reported_correctly():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        intermediate_ca = call('certificateauthority.create', {
-            **get_cert_params(),
-            'signedby': root_ca['id'],
-            'name': 'test_intermediate_ca',
-            'create_type': 'CA_CREATE_INTERMEDIATE',
-        })
-        try:
-            assert intermediate_ca['chain_list'] == [
-                intermediate_ca['certificate'], root_ca['certificate']
-            ], intermediate_ca
-        finally:
-            call('certificateauthority.delete', intermediate_ca['id'])
-
-
-def test_importing_ca():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        imported_ca = call('certificateauthority.create', {
-            'certificate': root_ca['certificate'],
-            'privatekey': root_ca['privatekey'],
-            'name': 'test_imported_ca',
-            'create_type': 'CA_CREATE_IMPORTED',
-        })
-        try:
-            assert imported_ca['CA_type_existing'] is True, imported_ca
-        finally:
-            call('certificateauthority.delete', imported_ca['id'])
-
-
-def test_ca_imported_issuer_reported_correctly():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        imported_ca = call('certificateauthority.create', {
-            'certificate': root_ca['certificate'],
-            'privatekey': root_ca['privatekey'],
-            'name': 'test_imported_ca',
-            'create_type': 'CA_CREATE_IMPORTED',
-        })
-        try:
-            assert imported_ca['issuer'] == 'external', imported_ca
-        finally:
-            call('certificateauthority.delete', imported_ca['id'])
-
-
-def test_ca_imported_add_to_trusted_store_reported_correctly():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        imported_ca = call('certificateauthority.create', {
-            'certificate': root_ca['certificate'],
-            'privatekey': root_ca['privatekey'],
-            'name': 'test_tinkerbell',
-            'add_to_trusted_store': True,
-            'create_type': 'CA_CREATE_IMPORTED',
-        })
-        try:
-            assert imported_ca['add_to_trusted_store'] is True, imported_ca
-        finally:
-            call('certificateauthority.delete', imported_ca['id'])
-
-
-def test_creating_cert_from_root_ca():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        cert = call('certificate.create', {
-            'name': 'cert_test',
-            'signedby': root_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            **get_cert_params(),
-        }, job=True)
-        try:
-            assert cert['cert_type_internal'] is True, cert
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-def test_cert_chain_of_root_ca_reported_correctly():
-    with root_certificate_authority('root_ca_test') as root_ca:
-        cert = call('certificate.create', {
-            'name': 'cert_test',
-            'signedby': root_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            **get_cert_params(),
-        }, job=True)
-        try:
-            assert cert['chain_list'] == [cert['certificate'], root_ca['certificate']], cert
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-def test_creating_cert_from_intermediate_ca():
-    with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca):
-        cert = call('certificate.create', {
-            'name': 'cert_test',
-            'signedby': intermediate_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            **get_cert_params(),
-        }, job=True)
-        try:
-            assert cert['cert_type_internal'] is True, cert
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-def test_cert_chain_reported_correctly():
-    with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca):
-        cert = call('certificate.create', {
-            'name': 'cert_test',
-            'signedby': intermediate_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            **get_cert_params(),
-        }, job=True)
-        try:
-            assert cert['chain_list'] == [
-                cert['certificate'], intermediate_ca['certificate'], root_ca['certificate']
-            ], cert
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-def test_cert_issuer_reported_correctly():
-    with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca):
-        cert = call('certificate.create', {
-            'name': 'cert_test',
-            'signedby': intermediate_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            **get_cert_params(),
-        }, job=True)
-        intermediate_ca = call('certificateauthority.get_instance', intermediate_ca['id'])
-        try:
-            assert cert['issuer'] == intermediate_ca, cert
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-@pytest.mark.parametrize('add_to_trusted_store_enabled', [
-    True,
-    False,
-])
-def test_cert_add_to_trusted_store(add_to_trusted_store_enabled):
-    with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca):
-        cert = call('certificate.create', {
-            'name': 'cert_trusted_store_test',
-            'signedby': intermediate_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            'add_to_trusted_store': add_to_trusted_store_enabled,
-            **get_cert_params(),
-        }, job=True)
-        try:
-            assert cert['add_to_trusted_store'] == add_to_trusted_store_enabled
-            args = ['filesystem.stat', os.path.join('/var/local/ca-certificates', f'cert_{cert["name"]}.crt')]
-            if add_to_trusted_store_enabled:
-                assert call(*args)
-            else:
-                with pytest.raises(Exception):
-                    call(*args)
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-def test_creating_csr():
-    with certificate_signing_request('csr_test') as csr:
-        assert csr['cert_type_CSR'] is True, csr
-
-
-def test_issuer_of_csr():
-    with certificate_signing_request('csr_test') as csr:
-        assert csr['issuer'] == 'external - signature pending', csr
-
-
-def test_signing_csr():
-    with root_certificate_authority('root_ca') as root_ca:
-        with certificate_signing_request('csr_test') as csr:
-            cert = call('certificateauthority.ca_sign_csr', {
-                'ca_id': root_ca['id'],
-                'csr_cert_id': csr['id'],
-                'name': 'signed_cert',
-            })
-            root_ca = call('certificateauthority.get_instance', root_ca['id'])
-            try:
-                assert isinstance(cert['signedby'], dict), cert
-                assert cert['signedby']['id'] == root_ca['id'], cert
-                assert cert['chain_list'] == [cert['certificate'], root_ca['certificate']]
-                assert cert['issuer'] == root_ca, cert
-            finally:
-                call('certificate.delete', cert['id'], job=True)
-
-
-def test_sign_csr_with_imported_ca():
-    # Creating Root CA
-    private_key = rsa.generate_private_key(
-        public_exponent=65537,
-        key_size=2048,
-        backend=default_backend()
-    )
-
-    # Serialize the private key
-    private_key_bytes = private_key.private_bytes(
-        encoding=serialization.Encoding.PEM,
-        format=serialization.PrivateFormat.TraditionalOpenSSL,
-        encryption_algorithm=serialization.NoEncryption()  # No encryption for test purposes
-    )
-
-    # Create a self-signed certificate
-    subject = Name([
-        NameAttribute(cryptography.x509.NameOID.COUNTRY_NAME, u'US'),
-        NameAttribute(cryptography.x509.NameOID.STATE_OR_PROVINCE_NAME, u'Ohio'),
-        NameAttribute(cryptography.x509.NameOID.LOCALITY_NAME, u'Texas'),
-        NameAttribute(cryptography.x509.NameOID.ORGANIZATION_NAME, u'MyCA'),
-        NameAttribute(cryptography.x509.NameOID.COMMON_NAME, u'MyCA'),
-    ])
-    issuer = subject
-
-    cert = CertificateBuilder().subject_name(subject).issuer_name(issuer).not_valid_before(
-        datetime.datetime.utcnow()).not_valid_after(
-        datetime.datetime.utcnow() + datetime.timedelta(days=3650)).serial_number(
-        x509.random_serial_number()).public_key(
-        private_key.public_key()).add_extension(
-        BasicConstraints(ca=True, path_length=None), critical=True).sign(
-        private_key, hashes.SHA256(), default_backend()
-    )
-
-    # Serialize the certificate
-    cert_bytes = cert.public_bytes(serialization.Encoding.PEM)
-
-    imported_root_ca = call('certificateauthority.create', {
-        'certificate': cert_bytes.decode('utf-8'),
-        'privatekey': private_key_bytes.decode('utf-8'),
-        'name': 'test_imported_root_ca',
-        'create_type': 'CA_CREATE_IMPORTED',
-    })
-
-    with certificate_signing_request('csr_test') as csr:
-        cert = call('certificateauthority.ca_sign_csr', {
-            'ca_id': imported_root_ca['id'],
-            'csr_cert_id': csr['id'],
-            'name': 'inter_signed_csr'
-        })
-
-        cert_pem_data = cert['certificate'].encode()
-        cert_data = x509.load_pem_x509_certificate(cert_pem_data, default_backend())
-        cert_issuer = cert_data.issuer
-        ca_pem_data = imported_root_ca['certificate'].encode()
-        ca_data = x509.load_pem_x509_certificate(ca_pem_data, default_backend())
-        ca_subject = ca_data.subject
-        imported_root_ca = call('certificateauthority.get_instance', imported_root_ca['id'])
-
-        try:
-            assert imported_root_ca['CA_type_existing'] is True, imported_root_ca
-            assert isinstance(cert['signedby'], dict), cert
-            assert cert['signedby']['id'] == imported_root_ca['id'], cert
-            assert cert['chain_list'] == [cert['certificate'], imported_root_ca['certificate']], cert
-            assert cert['issuer'] == imported_root_ca, cert
-            assert cert_issuer == ca_subject
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-            call('certificateauthority.delete', imported_root_ca['id'])
-
-
-def test_revoking_cert():
-    with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca):
-        cert = call('certificate.create', {
-            'name': 'cert_test',
-            'signedby': intermediate_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            **get_cert_params(),
-        }, job=True)
-        try:
-            assert cert['can_be_revoked'] is True, cert
-            cert = call('certificate.update', cert['id'], {'revoked': True}, job=True)
-            assert cert['revoked'] is True, cert
-
-            root_ca = call('certificateauthority.get_instance', root_ca['id'])
-            intermediate_ca = call('certificateauthority.get_instance', intermediate_ca['id'])
-
-            assert len(root_ca['revoked_certs']) == 1, root_ca
-            assert len(intermediate_ca['revoked_certs']) == 1, intermediate_ca
-
-            assert root_ca['revoked_certs'][0]['certificate'] == cert['certificate'], root_ca
-            assert intermediate_ca['revoked_certs'][0]['certificate'] == cert['certificate'], intermediate_ca
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-def test_revoking_ca():
-    with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca):
-        cert = call('certificate.create', {
-            'name': 'cert_test',
-            'signedby': intermediate_ca['id'],
-            'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-            **get_cert_params(),
-        }, job=True)
-        try:
-            assert intermediate_ca['can_be_revoked'] is True, intermediate_ca
-            intermediate_ca = call('certificateauthority.update', intermediate_ca['id'], {'revoked': True})
-            assert intermediate_ca['revoked'] is True, intermediate_ca
-
-            cert = call('certificate.get_instance', cert['id'])
-            assert cert['revoked'] is True, cert
-
-            root_ca = call('certificateauthority.get_instance', root_ca['id'])
-            assert len(root_ca['revoked_certs']) == 2, root_ca
-            assert len(intermediate_ca['revoked_certs']) == 2, intermediate_ca
-
-            check_set = {intermediate_ca['certificate'], cert['certificate']}
-            assert set(c['certificate'] for c in intermediate_ca['revoked_certs']) == check_set, intermediate_ca
-            assert set(c['certificate'] for c in root_ca['revoked_certs']) == check_set, root_ca
-        finally:
-            call('certificate.delete', cert['id'], job=True)
-
-
-def test_created_certs_exist_on_filesystem():
-    with intermediate_certificate_authority('root_ca', 'intermediate_ca') as (root_ca, intermediate_ca):
-        with certificate_signing_request('csr_test') as csr:
-            cert = call('certificate.create', {
-                'name': 'cert_test',
-                'signedby': intermediate_ca['id'],
-                'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-                **get_cert_params(),
-            }, job=True)
-            try:
-                assert get_cert_current_files() == get_cert_expected_files()
-            finally:
-                call('certificate.delete', cert['id'], job=True)
-
-
-def test_deleted_certs_dont_exist_on_filesystem():
-    with intermediate_certificate_authority('root_ca2', 'intermediate_ca2') as (root_ca2, intermediate_ca2):
-        # no-op
-        pass
-    with certificate_signing_request('csr_test2') as csr2:
-        pass
-    assert get_cert_current_files() == get_cert_expected_files()
-
-
-def get_cert_expected_files():
-    certs = call('certificate.query')
-    cas = call('certificateauthority.query')
-    expected_files = {'/etc/certificates/CA'}
-    for cert in certs + cas:
-        if cert['chain_list']:
-            expected_files.add(cert['certificate_path'])
-        if cert['privatekey']:
-            expected_files.add(cert['privatekey_path'])
-        if cert['cert_type_CSR']:
-            expected_files.add(cert['csr_path'])
-        if any(cert[k] for k in ('CA_type_existing', 'CA_type_internal', 'CA_type_intermediate')):
-            expected_files.add(cert['crl_path'])
-    return expected_files
-
-
-def get_cert_current_files():
-    return {
-        f['path']
-        for p in ('/etc/certificates', '/etc/certificates/CA') for f in call('filesystem.listdir', p)
-    }
-
-
-@pytest.mark.parametrize('life_time,should_work', [
-    (300, True),
-    (9999999, False),
-])
-def test_certificate_lifetime_validation(life_time, should_work):
-    cert_params = get_cert_params()
-    cert_params['lifetime'] = life_time
-    with root_certificate_authority('root-ca') as root_ca:
-        if should_work:
-            cert = None
-            try:
-                cert = call(
-                    'certificate.create', {
-                        'name': 'test-cert',
-                        'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-                        'signedby': root_ca['id'],
-                        **cert_params,
-                    }, job=True
-                )
-                assert cert['parsed'] is True, cert
-            finally:
-                if cert:
-                    call('certificate.delete', cert['id'], job=True)
-        else:
-            with pytest.raises(ValidationErrors):
-                call(
-                    'certificate.create', {
-                        'name': 'test-cert',
-                        'signedby': root_ca['id'],
-                        'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-                        **cert_params,
-                    }, job=True
-                )
-
-
-@pytest.mark.parametrize('certificate,private_key,should_work', [
-    (
-        textwrap.dedent('''\
-            -----BEGIN CERTIFICATE-----
-            MIIEDTCCAvWgAwIBAgIEAKWUWTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJV
-            UzEMMAoGA1UECAwDdXNhMRMwEQYDVQQHDApjYWxpZm9ybmlhMQswCQYDVQQKDAJs
-            bTEWMBQGCSqGSIb3DQEJARYHYUBiLmNvbTAeFw0yMzA0MDYxNjQyMTJaFw0yNDA1
-            MDcxNjQyMTJaME4xCzAJBgNVBAYTAlVTMQwwCgYDVQQIDAN1c2ExDDAKBgNVBAcM
-            A3VzYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYy5jb20wggEiMA0G
-            CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtvPEA2x3/jp0riSdgb7TqB9uAobzt
-            tYbW9E0+WLqf3sLJJ4F4Iq0AI1YYMtOOwcjmvC52eSaqxoGcY4G2J+RgQNR8b8lk
-            m38vRYQA2SkDCtEQFkLiCrkr5g20xh89gCLEr9c5x45p8Pl7q2LmE6wVIVjWqTSi
-            Yo4TMD8Nb5LN3vPeM7+fwV7FZDH7PJ4AT1/kTJjhkK0wiOGeTLEW5wiSYO8QMD0r
-            JHfzAp8UPFsVK8InZTjLS4VJgI0OlG2Von7Nv7Wtxsg5hi7dkLu2tawHE8DD97O5
-            zhVTZHzBiDF1mrjR3+6RWgn8iF6353UV9hbyPYz51UiCEYHBwFtqQaBlAgMBAAGj
-            geswgegwDgYDVR0RBAcwBYIDYWJjMB0GA1UdDgQWBBSRzlS66ts6rhuCN+4VK2x7
-            8E+n1zAMBgNVHRMBAf8EAjAAMIGABgNVHSMEeTB3gBR1fZ31S5XHrijsT/C9fzbB
-            aqrg5qFZpFcwVTELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA3VzYTETMBEGA1UEBwwK
-            Y2FsaWZvcm5pYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYi5jb22C
-            BACllFgwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwIwDgYDVR0PAQH/BAQDAgOIMA0G
-            CSqGSIb3DQEBCwUAA4IBAQA7UwYNr6gspgRcCGwzl5RUAL/N3NXv3rcgTPF405s5
-            OXKDPAxWSulzt/jqAesYvI27koOsGj0sDsSRLRdmj4HG91Xantnv5rxGqdYHEDPo
-            j8oo1HQv8vqhDcKUJOKH5j5cWO+W75CpAHuMfgxKJ9WdxPSNpKZoOKIMd2hwd4ng
-            2+ulgfvVKcE4PM4YSrtW4qoAoz/+gyfwSoIAQJ0VOuEwL+QFJ8Ud1aJaJRkLD39P
-            uLEje++rBbfIX9VPCRS/c3gYAOHu66LYI3toTomY8U3YYiQk8bC3Rp9uAjmgI3br
-            4DHLwRTEUbOL8CdNcGb1qvO8xBSRzjMIZM8QJHSyYNcM
-            -----END CERTIFICATE-----
-        '''),
-        textwrap.dedent('''\
-            -----BEGIN PRIVATE KEY-----
-            MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCtvPEA2x3/jp0r
-            iSdgb7TqB9uAobzttYbW9E0+WLqf3sLJJ4F4Iq0AI1YYMtOOwcjmvC52eSaqxoGc
-            Y4G2J+RgQNR8b8lkm38vRYQA2SkDCtEQFkLiCrkr5g20xh89gCLEr9c5x45p8Pl7
-            q2LmE6wVIVjWqTSiYo4TMD8Nb5LN3vPeM7+fwV7FZDH7PJ4AT1/kTJjhkK0wiOGe
-            TLEW5wiSYO8QMD0rJHfzAp8UPFsVK8InZTjLS4VJgI0OlG2Von7Nv7Wtxsg5hi7d
-            kLu2tawHE8DD97O5zhVTZHzBiDF1mrjR3+6RWgn8iF6353UV9hbyPYz51UiCEYHB
-            wFtqQaBlAgMBAAECggEAFNc827rtIspDPUUzFYTg4U/2+zurk6I6Xg+pMmjnXiUV
-            HZchFz2lngYfHkD+krnZNSBuvGR1CHhOdOmU1jp70TYFpzWrpWdnvs5qcsWZ/1Tt
-            Vi4tcLsTkloC2+QGPFTiFtD3EuXGxhuTecvJzcqfUluRMhLTDwWegFvBvIVdSVeZ
-            9XFDZF9O748tdt2PhYcL2L/xDz4sIz89ek4P1v4raB52rcleIduqMat29crVR3ex
-            VsZK3PLW6HCquUQvdvjLblfzjDS1pqcpIiSsYCrP0eEEKrrg44V8VjcPxXIg4GAE
-            ioDOpi9vO/3xyxYxXBtlD2o6c9kZUrp+xxx9jztdIQKBgQDo8witC33Z7Rd6dLm9
-            zgN/wZ2lWqE927fXZBExKjCXZ+A3N58One0TR2qI9S+BRVc2KOCWFGUjnHbx1PfE
-            xU1UNDY+ir9Lqk+rzhyEk4vst/IwhyovmAhL5fONqlfxB+l29cUh6JIYMtqaWYvj
-            AbmS5YhZRMa3kI/BtCTRJtPecQKBgQC+7f57XWt7HNe7FvrDTz5M8AmQ7y487NxZ
-            OcZ1+YKJ57PVY7G7Ye3xqRTd05L6h1P1eCO0gLDiSy5VOz47uFdNcD/9Ia+Ng2oq
-            P8TC36b86dz3ZDhBm4AB3shaD/JBjUQ0NwLosmrMaDF+lVu8NPA60eeQ70/RgbSA
-            KNrOUH1DNQKBgQDicOzsGZGat6fs925enNY16CWwSOsYUG7ix3kWy6Y0Z1tDEaRh
-            9w4vgWqD+6LUDG18TjwSZ3zxIvVUmurGsew7gA2Cuii+Cq4rmc2K6kpIL38TwTA2
-            15io/rzD5uRZfpFpe/rGvWbWcwigpY8fedvEea8S55IrejDj4JMxZIbrYQKBgQCG
-            Ke68+XRhWm8thIRJYhHBNptCQRAYt8hO2o5esCnOhgaUWC24IqR1P/7tsZKCgT26
-            K+XLHPMu0O2J7stYY7zVKZ+NXHJj2ohrj8vPtCE/b4ZaQQ5W69ITfl0DDFmLPp1C
-            o7Vjlpv9bun4rTN9GSYF7yHtcnyAF8iilhLLDzw2UQKBgQC4FzI6/P2HcUNzf+/m
-            AThk8+4V35gOSxn3uk48CXNStcCoLMEeXM69SGYq8GaGU/piaog9D8RvF4yMAnnL
-            wNpy8J/4ldluyidX61N0dMS+NL4l4TPjTvOY22KzjwfnBoqzg+93Mt//M4HfR/ka
-            3EWl5VmzbuEeytrcH3uHAUpkKg==
-            -----END PRIVATE KEY-----
-        '''),
-        True,
-    ),
-    (
-        textwrap.dedent('''\
-           -----BEGIN CERTIFICATE-----
-           MIIEDTCCAvWgAwIBAgIEAKWUWTANBgkqhkiG9w0BAQsFADBVMQswCQYDVQQGEwJV
-           UzEMMAoGA1UECAwDdXNhMRMwEQYDVQQHDApjYWxpZm9ybmlhMQswCQYDVQQKDAJs
-           bTEWMBQGCSqGSIb3DQEJARYHYUBiLmNvbTAeFw0yMzA0MDYxNjQyMTJaFw0yNDA1
-           MDcxNjQyMTJaME4xCzAJBgNVBAYTAlVTMQwwCgYDVQQIDAN1c2ExDDAKBgNVBAcM
-           A3VzYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYy5jb20wggEiMA0G
-           CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtvPEA2x3/jp0riSdgb7TqB9uAobzt
-           tYbW9E0+WLqf3sLJJ4F4Iq0AI1YYMtOOwcjmvC52eSaqxoGcY4G2J+RgQNR8b8lk
-           m38vRYQA2SkDCtEQFkLiCrkr5g20xh89gCLEr9c5x45p8Pl7q2LmE6wVIVjWqTSi
-           Yo4TMD8Nb5LN3vPeM7+fwV7FZDH7PJ4AT1/kTJjhkK0wiOGeTLEW5wiSYO8QMD0r
-           JHfzAp8UPFsVK8InZTjLS4VJgI0OlG2Von7Nv7Wtxsg5hi7dkLu2tawHE8DD97O5
-           zhVTZHzBiDF1mrjR3+6RWgn8iF6353UV9hbyPYz51UiCEYHBwFtqQaBlAgMBAAGj
-           geswgegwDgYDVR0RBAcwBYIDYWJjMB0GA1UdDgQWBBSRzlS66ts6rhuCN+4VK2x7
-           8E+n1zAMBgNVHRMBAf8EAjAAMIGABgNVHSMEeTB3gBR1fZ31S5XHrijsT/C9fzbB
-           aqrg5qFZpFcwVTELMAkGA1UEBhMCVVMxDDAKBgNVBAgMA3VzYTETMBEGA1UEBwwK
-           Y2FsaWZvcm5pYTELMAkGA1UECgwCbG0xFjAUBgkqhkiG9w0BCQEWB2FAYi5jb22C
-           BACllFgwFgYDVR0lAQH/BAwwCgYIKwYBBQUHAwIwDgYDVR0PAQH/BAQDAgOIMA0G
-           CSqGSIb3DQEBCwUAA4IBAQA7UwYNr6gspgRcCGwzl5RUAL/N3NXv3rcgTPF405s5
-           OXKDPAxWSulzt/jqAesYvI27koOsGj0sDsSRLRdmj4HG91Xantnv5rxGqdYHEDPo
-           j8oo1HQv8vqhDcKUJOKH5j5cWO+W75CpAHuMfgxKJ9WdxPSNpKZoOKIMd2hwd4ng
-           2+ulgfvVKcE4PM4YSrtW4qoAoz/+gyfwSoIAQJ0VOuEwL+QFJ8Ud1aJaJRkLD39P
-           uLEje++rBbfIX9VPCRS/c3gYAOHu66LYI3toTomY8U3YYiQk8bC3Rp9uAjmgI3br
-           4DHLwRTEUbOL8CdNcGb1qvO8xBSRzjMIZM8QJHSyYNcM
-           -----END CERTIFICATE-----
-        '''),
-        textwrap.dedent('''\
-            -----BEGIN PRIVATE KEY-----
-            MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDVMPccUqq6jd8h
-            h0ybrwRkvK+pvOJze00IK7F6A8RRyCwDL2Yc0GpWR5ecY+jBiZ1n+TfKfaybdKR0
-            0hhFFuU74JTsUk298hI1GVBNvwbimgraQciWjg0wDjHAN7AFZL8Jb/Tn7/DZlmn+
-            TgqdPaFIeD4XnLX6zwrc4VemKYDDcdr5JyDVCt3ZtqTEbbtxQ4WvZbtCxlzlkyJu
-            xwdmGyCvjkQri55+FaejvnPCUzJSOK28jShBuZCIS3lR7HCcAS4cc05TTrWSZr+i
-            brLISVEz1XASc0pKz8QGMuz5Hk5uNRLl4JGmWZrSV9lqtFYP9hatpLi5mnhWpgYi
-            Q0IXvNUXAgMBAAECggEAdbgf+0e6dmC4gO8Q4jZ2GpoF9ZgTAulm08gsq89ArFf3
-            1ZpqrCZ5UUMe+IBCmfu/KxZ2NB3JHd3+oXMRa7UEx1dvZD7eJrBwVVmw+f0tdBrT
-            O0lv1ZKCvbJYzmbxj0jeI/vqI9heCggAZyf4vHK3iCi9QJSL9/4zZVwY5eus6j4G
-            RCMXW8ZqiKX3GLtCjPmZilYQHNDbsfAbqy75AsG81fgaKkYkJS29rte9R34BajZs
-            OFm+y6nIe6zsf0vhn/yPVN4Yhuu/WhkvqouR2NhSF7ulXckuR/ef55GPpbRcpSOj
-            VUkwJL3wsHPozvmcks/TnZbqj0u7XBGjZ2VK8sF+gQKBgQDsJGMeeaua5pOITVHk
-            reHaxy4tLs1+98++L9SffBbsQcCu4OdgMBizCXuUw9bHlMx19B/B56cJst239li3
-            dHfC/mF4/8em5XOx97FyC0rF02qYCPXViTrTSovSEWHuM/ChmhaRlZdp5F4EBMp7
-            ELdf4OBCHGz47UCLQF75/FPtJwKBgQDnHn9HuFepY+yV1sNcPKj1GfciaseKzTk1
-            Iw5VVtqyS2p8vdXNUiJmaF0245S3phRBL6PDhdfd3SwMmNYvhTYsqBc6ZRHO4b9J
-            SjmHct63286NuEn0piYaa3MZ8sV/xI0a5leAdkzyqPTCcn0HlvDL0HTV34umdmfj
-            kqC4jsWukQKBgC48cavl5tPNkdV+TiqYYUCU/1WZdGMH4oU6mEch5NsdhLy5DJSo
-            1i04DhpyvfsWB3KQ+ibdVLdxbjg24+gHxetII42th0oGY0DVXskVrO5PFu/t0TSe
-            SgZU8kuPW71oLhV2NjULNTpmnIHs7jhqbX04arCHIE8dJSYe1HneDhDBAoGBALTk
-            4txgxYQYaNFykd/8voVwuETg7KOQM0mK0aor2+qXKpbOAqy8r54V63eNsxX20H2g
-            6v2bIbVOai7F5Ua2bguP2PZkqwaRHKYhiVuhpf6j9UxpRMFO1h3xodpacQiq74Jx
-            bWVnspxvb3tOHtw04O21j+ziFizJGlE9r7wkS0dxAoGAeq/Ecb+nJp/Ce4h5US1O
-            4rruiLLYMkcFGmhSMcQ+lVbGOn4eSpqrGWn888Db2oiu7mv+u0TK9ViXwHkfp4FP
-            Hnm0S8e25py1Lj+bk1tH0ku1I8qcAtihYBtSwPGj+66Qyr8KOlxZP2Scvcqu+zBc
-            cyhsrrlRc3Gky9L5gtdxdeo=
-            -----END PRIVATE KEY-----
-        '''),
-        False,
-    ),
-    (
-        textwrap.dedent('''\
-           -----BEGIN CERTIFICATE-----
-           ntnv5rxGqdYHEDPo
-           j8oo1HQv8vqhDcKUJOKH5j5cWO+W75CpAHuMfgxKJ9WdxPSNpKZoOKIMd2hwd4ng
-           2+ulgfvVKcE4PM4YSrtW4qoAoz/+gyfwSoIAQJ0VOuEwL+QFJ8Ud1aJaJRkLD39P
-           uLEje++rBbfIX9VPCRS/c3gYAOHu66LYI3toTomY8U3YYiQk8bC3Rp9uAjmgI3br
-           4DHLwRTEUbOL8CdNcGb1qvO8xBSRzjMIZM8QJHSyYNcM
-           -----END CERTIFICATE-----
-        '''),
-        textwrap.dedent('''\
-            -----BEGIN PRIVATE KEY-----
-            MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDVMPccUqq6jd8h
-            h0ybrwRkvK+pvOJze00IK7F6A8RRyCwDL2Yc0GpWR5ecY+jBiZ1n+TfKfaybdKR0
-            0hhFFuU74JTsUk298hI1GVBNvwbimgraQciWjg0wDjHAN7AFZL8Jb/Tn7/DZlmn+
-            TgqdPaFIeD4XnLX6zwrc4VemKYDDcdr5JyDVCt3ZtqTEbbtxQ4WvZbtCxlzlkyJu
-            xwdmGyCvjkQri55+FaejvnPCUzJSOK28jShBuZCIS3lR7HCcAS4cc05TTrWSZr+i
-            brLISVEz1XASc0pKz8QGMuz5Hk5uNRLl4JGmWZrSV9lqtFYP9hatpLi5mnhWpgYi
-            Q0IXvNUXAgMBAAECggEAdbgf+0e6dmC4gO8Q4jZ2GpoF9ZgTAulm08gsq89ArFf3
-            1ZpqrCZ5UUMe+IBCmfu/KxZ2NB3JHd3+oXMRa7UEx1dvZD7eJrBwVVmw+f0tdBrT
-            O0lv1ZKCvbJYzmbxj0jeI/vqI9heCggAZyf4vHK3iCi9QJSL9/4zZVwY5eus6j4G
-            RCMXW8ZqiKX3GLtCjPmZilYQHNDbsfAbqy75AsG81fgaKkYkJS29rte9R34BajZs
-            OFm+y6nIe6zsf0vhn/yPVN4Yhuu/WhkvqouR2NhSF7ulXckuR/ef55GPpbRcpSOj
-            VUkwJL3wsHPozvmcks/TnZbqj0u7XBGjZ2VK8sF+gQKBgQDsJGMeeaua5pOITVHk
-            reHaxy4tLs1+98++L9SffBbsQcCu4OdgMBizCXuUw9bHlMx19B/B56cJst239li3
-            dHfC/mF4/8em5XOx97FyC0rF02qYCPXViTrTSovSEWHuM/ChmhaRlZdp5F4EBMp7
-            ELdf4OBCHGz47UCLQF75/FPtJwKBgQDnHn9HuFepY+yV1sNcPKj1GfciaseKzTk1
-            Iw5VVtqyS2p8vdXNUiJmaF0245S3phRBL6PDhdfd3SwMmNYvhTYsqBc6ZRHO4b9J
-            SjmHct63286NuEn0piYaa3MZ8sV/xI0a5leAdkzyqPTCcn0HlvDL0HTV34umdmfj
-            kqC4jsWukQKBgC48cavl5tPNkdV+TiqYYUCU/1WZdGMH4oU6mEch5NsdhLy5DJSo
-            1i04DhpyvfsWB3KQ+ibdVLdxbjg24+gHxetII42th0oGY0DVXskVrO5PFu/t0TSe
-            SgZU8kuPW71oLhV2NjULNTpmnIHs7jhqbX04arCHIE8dJSYe1HneDhDBAoGBALTk
-            4txgxYQYaNFykd/8voVwuETg7KOQM0mK0aor2+qXKpbOAqy8r54V63eNsxX20H2g
-            6v2bIbVOai7F5Ua2bguP2PZkqwaRHKYhiVuhpf6j9UxpRMFO1h3xodpacQiq74Jx
-            bWVnspxvb3tOHtw04O21j+ziFizJGlE9r7wkS0dxAoGAeq/Ecb+nJp/Ce4h5US1O
-            4rruiLLYMkcFGmhSMcQ+lVbGOn4eSpqrGWn888Db2oiu7mv+u0TK9ViXwHkfp4FP
-            Hnm0S8e25py1Lj+bk1tH0ku1I8qcAtihYBtSwPGj+66Qyr8KOlxZP2Scvcqu+zBc
-            cyhsrrlRc3Gky9L5gtdxdeo=
-            -----END PRIVATE KEY-----
-        '''),
-        False,
-    )
-], ids=['valid_cert', 'invalid_cert', 'invalid_cert'])
-def test_importing_certificate_validation(certificate, private_key, should_work):
-    cert_params = {'certificate': certificate, 'privatekey': private_key}
-    if should_work:
-        cert = None
-        if should_work:
-            try:
-                cert = call(
-                    'certificate.create', {
-                        'name': 'test-cert',
-                        'create_type': 'CERTIFICATE_CREATE_IMPORTED',
-                        **cert_params,
-                    }, job=True
-                )
-                assert cert['parsed'] is True, cert
-            finally:
-                if cert:
-                    call('certificate.delete', cert['id'], job=True)
-
-        else:
-            with pytest.raises(ValidationErrors):
-                call(
-                    'certificate.create', {
-                        'name': 'test-cert',
-                        'create_type': 'CERTIFICATE_CREATE_IMPORTED',
-                        **cert_params,
-                    }, job=True
-                )
diff --git a/tests/api2/test_client_job.py b/tests/api2/test_client_job.py
deleted file mode 100644
index 04ad2df0b1525..0000000000000
--- a/tests/api2/test_client_job.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import pprint
-import time
-
-import pytest
-
-from middlewared.test.integration.utils import client, mock
-
-
-# FIXME: Sometimes an equal message for `SUCCESS` state is being sent (or received) twice, we were not able
-# to understand why and this does not break anything so we are not willing to waste our time investigating
-# this.
-# Also, `RUNNING` message sometimes is not received, this does not have a logical explanation as well and is not
-# repeatable.
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_client_job_callback():
-    with mock("test.test1", """    
-        from middlewared.service import job
-
-        @job()
-        def mock(self, job, *args):
-            import time
-            time.sleep(2)
-            return 42
-    """):
-        with client() as c:
-            results = []
-
-            c.call("test.test1", job=True, callback=lambda job: results.append(job.copy()))
-
-            # callback is called in a separate thread, allow it to settle
-            time.sleep(2)
-
-            assert len(results) == 2, pprint.pformat(results, indent=2)
-            assert results[0]['state'] == 'RUNNING'
-            assert results[1]['state'] == 'SUCCESS'
-            assert results[1]['result'] == 42
diff --git a/tests/api2/test_cloud_backup.py b/tests/api2/test_cloud_backup.py
deleted file mode 100644
index ef585eff2213d..0000000000000
--- a/tests/api2/test_cloud_backup.py
+++ /dev/null
@@ -1,382 +0,0 @@
-import json
-import os
-import re
-import time
-import types
-
-import boto3
-import pytest
-
-from truenas_api_client import ClientException
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.cloud_backup import task, run_task
-from middlewared.test.integration.assets.cloud_sync import credential
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils.call import call
-from middlewared.test.integration.utils.mock import mock
-from middlewared.test.integration.utils.ssh import ssh
-
-try:
-    from config import (
-        AWS_ACCESS_KEY_ID,
-        AWS_SECRET_ACCESS_KEY,
-        AWS_BUCKET,
-    )
-except ImportError:
-    pytestmark = pytest.mark.skip(reason="AWS credential are missing in config.py")
-
-
-def clean():
-    s3 = boto3.Session(
-        aws_access_key_id=AWS_ACCESS_KEY_ID,
-        aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
-    ).resource("s3")
-    bucket = s3.Bucket(AWS_BUCKET)
-    bucket.objects.filter(Prefix="cloud_backup/").delete()
-
-
-def parse_log(task_id):
-    log = ssh("cat " + call("cloud_backup.get_instance", task_id)["job"]["logs_path"])
-    return [json.loads(line) for line in log.strip().split("\n")]
-
-
-def validate_log(task_id, **kwargs):
-    log = parse_log(task_id)
-    log, summary = log[:-2], log[-2]
-
-    for message in log:
-        if message["message_type"] == "error":
-            pytest.fail(f'Received restic error {message}')
-
-    assert all(summary[k] == v for k, v in kwargs.items())
-
-
-@pytest.fixture(scope="module")
-def s3_credential():
-    with credential({
-        "provider": {
-            "type": "S3",
-            "access_key_id": AWS_ACCESS_KEY_ID,
-            "secret_access_key": AWS_SECRET_ACCESS_KEY,
-        },
-    }) as c:
-        yield c
-
-
-@pytest.fixture(scope="function")
-def cloud_backup_task(s3_credential, request):
-    clean()
-
-    with dataset("cloud_backup") as local_dataset:
-        with task({
-            "path": f"/mnt/{local_dataset}",
-            "credentials": s3_credential["id"],
-            "attributes": {
-                "bucket": AWS_BUCKET,
-                "folder": "cloud_backup",
-            },
-            "password": "test",
-            "keep_last": 100,
-            **getattr(request, "param", {})
-        }) as t:
-            yield types.SimpleNamespace(
-                local_dataset=local_dataset,
-                task=t,
-            )
-
-
-def test_cloud_backup(cloud_backup_task):
-    task_ = cloud_backup_task.task
-    task_id_ = task_["id"]
-    local_dataset_ = cloud_backup_task.local_dataset
-
-    assert call("cloud_backup.list_snapshots", task_id_) == []
-
-    ssh(f"dd if=/dev/urandom of=/mnt/{local_dataset_}/blob1 bs=1M count=1")
-    run_task(task_)
-
-    validate_log(task_id_, files_new=1, files_changed=0, files_unmodified=0)
-
-    snapshots = call("cloud_backup.list_snapshots", task_id_)
-    first_snapshot = snapshots[0]
-    assert len(snapshots) == 1
-    assert (first_snapshot["time"] - call("system.info")["datetime"]).total_seconds() < 300
-    assert first_snapshot["paths"] == [f"/mnt/{local_dataset_}"]
-
-    ssh(f"mkdir /mnt/{local_dataset_}/dir1")
-    ssh(f"dd if=/dev/urandom of=/mnt/{local_dataset_}/dir1/blob2 bs=1M count=1")
-
-    run_task(task_)
-
-    validate_log(task_id_, files_new=1, files_changed=0, files_unmodified=1)
-
-    snapshots = call("cloud_backup.list_snapshots", task_id_)
-    assert len(snapshots) == 2
-
-    contents = call(
-        "cloud_backup.list_snapshot_directory",
-        task_id_,
-        snapshots[-1]["id"],
-        f"/mnt/{local_dataset_}",
-    )
-    assert len(contents) == 3
-    assert contents[0]["name"] == "cloud_backup"
-    assert contents[1]["name"] == "blob1"
-    assert contents[2]["name"] == "dir1"
-
-    call("cloud_backup.update", task_id_, {"keep_last": 2})
-
-    run_task(task_)
-
-    snapshots = call("cloud_backup.list_snapshots", task_id_)
-    assert all(snapshot["id"] != first_snapshot["id"] for snapshot in snapshots)
-
-    snapshot_to_delete_id = snapshots[0]["id"]
-    call("cloud_backup.delete_snapshot", task_id_, snapshot_to_delete_id, job=True)
-
-    snapshots = call("cloud_backup.list_snapshots", task_id_)
-    assert all(snapshot["id"] != snapshot_to_delete_id for snapshot in snapshots)
-
-
-@pytest.fixture(scope="module")
-def completed_cloud_backup_task(s3_credential):
-    clean()
-
-    with dataset("completed_cloud_backup") as local_dataset:
-        ssh(f"mkdir /mnt/{local_dataset}/dir1")
-        ssh(f"touch /mnt/{local_dataset}/dir1/file1")
-        ssh(f"mkdir /mnt/{local_dataset}/dir2")
-        ssh(f"touch /mnt/{local_dataset}/dir2/file2")
-        ssh(f"mkdir /mnt/{local_dataset}/dir3")
-        ssh(f"touch /mnt/{local_dataset}/dir3/file3")
-
-        with task({
-            "path": f"/mnt/{local_dataset}",
-            "credentials": s3_credential["id"],
-            "attributes": {
-                "bucket": AWS_BUCKET,
-                "folder": "cloud_backup",
-            },
-            "password": "test",
-            "keep_last": 100,
-        }) as t:
-            run_task(t)
-
-            snapshot = call("cloud_backup.list_snapshots", t["id"])[0]
-
-            yield types.SimpleNamespace(
-                local_dataset=local_dataset,
-                task=t,
-                snapshot=snapshot,
-            )
-
-
-@pytest.mark.parametrize("options,result", [
-    ({}, ["dir1/file1", "dir2/file2", "dir3/file3"]),
-    ({"include": ["dir1", "dir2"]}, ["dir1/file1", "dir2/file2"]),
-    ({"exclude": ["dir2", "dir3"]}, ["dir1/file1"]),
-])
-def test_cloud_backup_restore(completed_cloud_backup_task, options, result):
-    with dataset("restore") as restore:
-        call(
-            "cloud_backup.restore",
-            completed_cloud_backup_task.task["id"],
-            completed_cloud_backup_task.snapshot["id"],
-            f"/mnt/{completed_cloud_backup_task.local_dataset}",
-            f"/mnt/{restore}",
-            options,
-            job=True,
-        )
-
-        assert sorted([
-            os.path.relpath(path, f"/mnt/{restore}")
-            for path in ssh(f"find /mnt/{restore} -type f").splitlines()
-        ]) == result
-
-
-@pytest.fixture(scope="module")
-def zvol():
-    with dataset("cloud_backup_zvol", {"type": "VOLUME", "volsize": 1024 * 1024}) as zvol:
-        path = f"/dev/zvol/{zvol}"
-        ssh(f"dd if=/dev/urandom of={path} bs=1M count=1")
-
-        yield path
-
-
-def test_zvol_cloud_backup(s3_credential, zvol):
-    clean()
-
-    with mock("cloud_backup.validate_zvol", return_value=None):
-        with task({
-            "path": zvol,
-            "credentials": s3_credential["id"],
-            "attributes": {
-                "bucket": AWS_BUCKET,
-                "folder": "cloud_backup",
-            },
-            "password": "test",
-            "keep_last": 100,
-        }) as t:
-            run_task(t)
-
-
-def test_zvol_cloud_backup_create_time_validation(s3_credential, zvol):
-    clean()
-
-    with pytest.raises(ValidationErrors) as ve:
-        with task({
-            "path": zvol,
-            "credentials": s3_credential["id"],
-            "attributes": {
-                "bucket": AWS_BUCKET,
-                "folder": "cloud_backup",
-            },
-            "password": "test",
-            "keep_last": 100,
-        }):
-            pass
-
-    assert "cloud_backup_create.path" in ve.value
-
-
-def test_zvol_cloud_backup_runtime_validation(s3_credential, zvol):
-    clean()
-
-    m = mock("cloud_backup.validate_zvol", return_value=None)
-    m.__enter__()
-    exited = False
-    try:
-        with task({
-            "path": zvol,
-            "credentials": s3_credential["id"],
-            "attributes": {
-                "bucket": AWS_BUCKET,
-                "folder": "cloud_backup",
-            },
-            "password": "test",
-            "keep_last": 100,
-        }) as t:
-            m.__exit__(None, None, None)
-            exited = True
-
-            with pytest.raises(ClientException):
-                run_task(t)
-    finally:
-        if not exited:
-            m.__exit__(None, None, None)
-
-
-def test_create_to_backend_with_a_different_password(cloud_backup_task):
-    with pytest.raises(ValidationErrors) as ve:
-        with task({
-            "path": cloud_backup_task.task["path"],
-            "credentials": cloud_backup_task.task["credentials"]["id"],
-            "attributes": cloud_backup_task.task["attributes"],
-            "password": "test2",
-            "keep_last": 100,
-        }):
-            pass
-
-    assert "cloud_backup_create.password" in ve.value
-
-
-def test_update_with_incorrect_password(cloud_backup_task):
-    with pytest.raises(ValidationErrors) as ve:
-        call("cloud_backup.update", cloud_backup_task.task["id"], {"password": "test2"})
-
-    assert "cloud_backup_update.password" in ve.value
-
-
-def test_sync_initializes_repo(cloud_backup_task):
-    clean()
-
-    call("cloud_backup.sync", cloud_backup_task.task["id"], job=True)
-
-
-def test_transfer_setting_choices():
-    assert call("cloud_backup.transfer_setting_choices") == ["DEFAULT", "PERFORMANCE", "FAST_STORAGE"]
-
-
-@pytest.mark.parametrize("cloud_backup_task, options", [
-    (
-        {"transfer_setting": "PERFORMANCE"},
-        "'--pack-size', '29'"
-    ),
-    (
-        {"transfer_setting": "FAST_STORAGE"},
-        "'--pack-size', '58', '--read-concurrency', '100'"
-    )
-], indirect=["cloud_backup_task"])
-def test_other_transfer_settings(cloud_backup_task, options):
-    run_task(cloud_backup_task.task)
-    result = ssh(f'grep "{options}" /var/log/middlewared.log')
-    assert options in result
-
-
-def test_snapshot(s3_credential):
-    with dataset("cloud_backup_snapshot") as ds:
-        ssh(f"mkdir -p /mnt/{ds}/dir1/dir2")
-        ssh(f"dd if=/dev/urandom of=/mnt/{ds}/dir1/dir2/blob bs=1M count=1024")
-
-        with task({
-            "path": f"/mnt/{ds}/dir1/dir2",
-            "credentials": s3_credential["id"],
-            "attributes": {
-                "bucket": AWS_BUCKET,
-                "folder": "cloud_backup",
-            },
-            "password": "test",
-            "snapshot": True
-        }) as t:
-            pattern = rf"restic .+ /mnt/{ds}/.zfs/snapshot/cloud_backup-[0-9]+-[0-9]+/dir1/dir2"
-
-            job_id = call("cloud_backup.sync", t["id"], {"dry_run": True})
-
-            end = time.time() + 5
-            while time.time() <= end:
-                ps_ax = ssh("ps ax | grep restic")
-                if re.search(pattern, ps_ax):
-                    break
-                time.sleep(0.1)
-            else:
-                pytest.fail(f"Couldn't validate snapshot backup.\n{ps_ax}")
-
-            call("core.job_wait", job_id, job=True)
-
-        time.sleep(1)
-        assert call("zfs.snapshot.query", [["dataset", "=", ds]]) == []
-
-
-@pytest.mark.parametrize("cloud_backup_task, expected", [(
-    {"post_script": "#!/usr/bin/env python3\nprint('Test' * 2)"},
-    "[Post-script] TestTest"
-)], indirect=["cloud_backup_task"])
-def test_script_shebang(cloud_backup_task, expected):
-    run_task(cloud_backup_task.task)
-    job = call("core.get_jobs", [["method", "=", "cloud_backup.sync"]], {"order_by": ["-id"], "get": True})
-    assert job["logs_excerpt"].strip().split("\n")[-2] == expected
-
-
-@pytest.mark.parametrize("cloud_backup_task", [
-    {"pre_script": "touch /tmp/cloud_backup_test"},
-    {"post_script": "touch /tmp/cloud_backup_test"}
-], indirect=True)
-def test_scripts_ok(cloud_backup_task):
-    ssh("rm /tmp/cloud_backup_test", check=False)
-    run_task(cloud_backup_task.task)
-    ssh("cat /tmp/cloud_backup_test")
-
-
-@pytest.mark.parametrize("cloud_backup_task, error, expected", [(
-    {"pre_script": "echo Custom error\nexit 123"},
-    "[EFAULT] Pre-script failed with exit code 123",
-    "[Pre-script] Custom error"
-)], indirect=["cloud_backup_task"])
-def test_pre_script_failure(cloud_backup_task, error, expected):
-    with pytest.raises(ClientException) as ve:
-        run_task(cloud_backup_task.task)
-
-    assert ve.value.error == error
-
-    job = call("core.get_jobs", [["method", "=", "cloud_backup.sync"]], {"order_by": ["-id"], "get": True})
-    assert job["logs_excerpt"].strip() == expected
diff --git a/tests/api2/test_cloud_sync.py b/tests/api2/test_cloud_sync.py
deleted file mode 100644
index bde25807a0b89..0000000000000
--- a/tests/api2/test_cloud_sync.py
+++ /dev/null
@@ -1,239 +0,0 @@
-import re
-import time
-
-import pytest
-from middlewared.test.integration.assets.cloud_sync import (
-    credential, task, local_ftp_credential, local_ftp_task, run_task,
-)
-from middlewared.test.integration.assets.ftp import anonymous_ftp_server, ftp_server_with_user_account
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, pool, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from auto_config import ha
-
-
-def test_include():
-    with local_ftp_task({
-        "include": ["/office/**", "/work/**"],
-    }) as task:
-        ssh(f'mkdir {task["path"]}/office')
-        ssh(f'touch {task["path"]}/office/paper')
-        ssh(f'mkdir {task["path"]}/work')
-        ssh(f'touch {task["path"]}/work/code')
-        ssh(f'mkdir {task["path"]}/games')
-        ssh(f'touch {task["path"]}/games/minecraft')
-        ssh(f'touch {task["path"]}/fun')
-
-        run_task(task)
-
-        assert ssh(f'ls /mnt/{pool}/cloudsync_remote') == 'office\nwork\n'
-
-
-def test_exclude_recycle_bin():
-    with local_ftp_task({
-        "exclude": ["$RECYCLE.BIN/"],
-    }) as task:
-        ssh(f'mkdir {task["path"]}/\'$RECYCLE.BIN\'')
-        ssh(f'touch {task["path"]}/\'$RECYCLE.BIN\'/garbage')
-        ssh(f'touch {task["path"]}/file')
-
-        run_task(task)
-
-        assert ssh(f'ls /mnt/{pool}/cloudsync_remote') == 'file\n'
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-@pytest.mark.parametrize("anonymous", [True, False])
-@pytest.mark.parametrize("defaultroot", [True, False])
-@pytest.mark.parametrize("has_leading_slash", [True, False])
-def test_ftp_subfolder(anonymous, defaultroot, has_leading_slash):
-    with dataset("cloudsync_local") as local_dataset:
-        config = {"defaultroot": defaultroot}
-        with (anonymous_ftp_server if anonymous else ftp_server_with_user_account)(config) as ftp:
-            remote_dataset = ftp.dataset
-            ssh(f"touch /mnt/{remote_dataset}/bad-file")
-            ssh(f"mkdir /mnt/{remote_dataset}/data")
-            ssh(f"touch /mnt/{remote_dataset}/data/another-bad-file")
-            ssh(f"mkdir /mnt/{remote_dataset}/data/child")
-            ssh(f"touch /mnt/{remote_dataset}/data/child/good-file")
-
-            with credential({
-                "name": "Test",
-                "provider": {
-                    "type": "FTP",
-                    "host": "localhost",
-                    "port": 21,
-                    "user": ftp.username,
-                    "pass": ftp.password,
-                },
-            }) as c:
-                folder = f"{'/' if has_leading_slash else ''}data/child"
-                if not anonymous and not defaultroot:
-                    # We have access to the FTP server root directory
-                    if has_leading_slash:
-                        # A path with a leading slash should be complete path in this case
-                        folder = f"/mnt/{ftp.dataset}/data/child"
-
-                with task({
-                    "direction": "PULL",
-                    "transfer_mode": "MOVE",
-                    "path": f"/mnt/{local_dataset}",
-                    "credentials": c["id"],
-                    "attributes": {
-                        "folder": folder,
-                    },
-                }) as t:
-                    run_task(t)
-
-                    assert ssh(f'ls /mnt/{local_dataset}') == 'good-file\n'
-
-
-@pytest.mark.parametrize("has_zvol_sibling", [True, False])
-def test_snapshot(has_zvol_sibling):
-    with dataset("test_cloudsync_snapshot") as ds:
-        ssh(f"mkdir -p /mnt/{ds}/dir1/dir2")
-        ssh(f"dd if=/dev/urandom of=/mnt/{ds}/dir1/dir2/blob bs=1M count=1")
-
-        if has_zvol_sibling:
-            ssh(f"zfs create -V 1gb {pool}/zvol")
-
-        try:
-            with local_ftp_task({
-                "path": f"/mnt/{ds}/dir1/dir2",
-                "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 200}],  # So it'll take 5 seconds
-                "snapshot": True,
-            }) as task:
-                job_id = call("cloudsync.sync", task["id"])
-
-                time.sleep(2.5)
-
-                ps_ax = ssh("ps ax | grep rclone")
-
-                call("core.job_wait", job_id, job=True)
-
-                assert re.search(rf"rclone .+ /mnt/{ds}/.zfs/snapshot/cloud_sync-[0-9]+-[0-9]+/dir1/dir2", ps_ax)
-
-            time.sleep(1)
-
-            assert call("zfs.snapshot.query", [["dataset", "=", ds]]) == []
-        finally:
-            if has_zvol_sibling:
-                ssh(f"zfs destroy -r {pool}/zvol")
-
-
-def test_sync_onetime():
-    with dataset("cloudsync_local") as local_dataset:
-        with local_ftp_credential() as c:
-            call("cloudsync.sync_onetime", {
-                "direction": "PUSH",
-                "transfer_mode": "COPY",
-                "path": f"/mnt/{local_dataset}",
-                "credentials": c["id"],
-                "attributes": {
-                    "folder": "",
-                },
-            }, job=True)
-
-
-def test_abort():
-    with dataset("test_cloudsync_abort") as ds:
-        ssh(f"dd if=/dev/urandom of=/mnt/{ds}/blob bs=1M count=1")
-
-        with local_ftp_task({
-            "path": f"/mnt/{ds}",
-            "bwlimit": [{"time": "00:00", "bandwidth": 1024 * 100}],  # So it'll take 10 seconds
-        }) as task:
-            job_id = call("cloudsync.sync", task["id"])
-
-            time.sleep(2.5)
-
-            call("core.job_abort", job_id)
-
-            for i in range(10):
-                time.sleep(1)
-                state = call("cloudsync.query", [["id", "=", task["id"]]], {"get": True})["job"]["state"]
-                if state == "RUNNING":
-                    continue
-                elif state == "ABORTED":
-                    break
-                else:
-                    assert False, f"Cloud sync task is {state}"
-            else:
-                assert False, "Cloud sync task was not aborted"
-
-            assert "rclone" not in ssh("ps ax")
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-@pytest.mark.parametrize("create_empty_src_dirs", [True, False])
-def test_create_empty_src_dirs(create_empty_src_dirs):
-    with dataset("cloudsync_local") as local_dataset:
-        ssh(f"mkdir /mnt/{local_dataset}/empty-dir")
-        ssh(f"mkdir /mnt/{local_dataset}/non-empty-dir")
-        ssh(f"touch /mnt/{local_dataset}/non-empty-dir/file")
-
-        with anonymous_ftp_server() as ftp:
-            with credential({
-                "name": "Test",
-                "provider": {
-                    "type": "FTP",
-                    "host": "localhost",
-                    "port": 21,
-                    "user": ftp.username,
-                    "pass": ftp.password,
-                },
-            }) as c:
-                with task({
-                    "direction": "PUSH",
-                    "transfer_mode": "SYNC",
-                    "path": f"/mnt/{local_dataset}",
-                    "credentials": c["id"],
-                    "attributes": {
-                        "folder": "",
-                    },
-                    "create_empty_src_dirs": create_empty_src_dirs,
-                }) as t:
-                    run_task(t)
-
-                    if create_empty_src_dirs:
-                        assert ssh(f'ls /mnt/{ftp.dataset}') == 'empty-dir\nnon-empty-dir\n'
-                    else:
-                        assert ssh(f'ls /mnt/{ftp.dataset}') == 'non-empty-dir\n'
-
-
-def test_state_persist():
-    with dataset("test_cloudsync_state_persist") as ds:
-        with local_ftp_task({
-            "path": f"/mnt/{ds}",
-        }) as task:
-            call("cloudsync.sync", task["id"], job=True)
-
-            row = call("datastore.query", "tasks.cloudsync", [["id", "=", task["id"]]], {"get": True})
-            assert row["job"]["state"] == "SUCCESS"
-
-
-if ha:
-    def test_state_failover():
-        assert call("failover.status") == "MASTER"
-
-        ha_ips = truenas_server.ha_ips()
-
-        with dataset("test_cloudsync_state_failover") as ds:
-            with local_ftp_task({"path": f"/mnt/{ds}"}) as task:
-                call("cloudsync.sync", task["id"], job=True)
-                time.sleep(5)  # Job sending is not synchronous, allow it to propagate
-
-                file1_path = call("cloudsync.get_instance", task["id"])["job"]["logs_path"]
-                file1_contents = ssh(f'cat {file1_path}', ip=ha_ips['active'])
-                assert file1_contents
-
-                file2_path = call("failover.call_remote", "cloudsync.get_instance", [task["id"]])["job"]["logs_path"]
-                file2_contents = ssh(f'cat {file2_path}', ip=ha_ips['standby'])
-                assert file2_contents
-
-                assert file1_contents == file2_contents
diff --git a/tests/api2/test_cloud_sync_config.py b/tests/api2/test_cloud_sync_config.py
deleted file mode 100644
index 1ed509f3450ce..0000000000000
--- a/tests/api2/test_cloud_sync_config.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import time
-
-from middlewared.test.integration.assets.cloud_sync import credential, task
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.mock_rclone import mock_rclone
-
-
-def test_rclone_config_writer_bool():
-    with dataset("test_cloud_sync_config") as ds:
-        with credential({
-            "name": "Google Cloud Storage",
-            "provider": {
-                "type": "GOOGLE_CLOUD_STORAGE",
-                "service_account_credentials": "{\"project_id\": 1}",
-            },
-        }) as c:
-            with task({
-                "direction": "PUSH",
-                "transfer_mode": "COPY",
-                "path": f"/mnt/{ds}",
-                "credentials": c["id"],
-                "attributes": {
-                    "bucket": "bucket",
-                    "folder": "",
-                    "bucket_policy_only": True,
-                },
-            }) as t:
-                with mock_rclone() as mr:
-                    call("cloudsync.sync", t["id"])
-
-                    time.sleep(2.5)
-
-                    assert mr.result["config"]["remote"]["bucket_policy_only"] == "true"
diff --git a/tests/api2/test_cloud_sync_credentials.py b/tests/api2/test_cloud_sync_credentials.py
deleted file mode 100644
index 196549a47bf8d..0000000000000
--- a/tests/api2/test_cloud_sync_credentials.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from middlewared.test.integration.assets.cloud_sync import local_ftp_credential_data
-from middlewared.test.integration.utils import call
-
-
-def test_verify_cloud_credential():
-    with local_ftp_credential_data() as data:
-        assert call("cloudsync.credentials.verify", data["provider"])["valid"]
-
-
-def test_verify_cloud_credential_fail():
-    with local_ftp_credential_data() as data:
-        data["provider"]["user"] = "root"
-        assert not call("cloudsync.credentials.verify", data["provider"])["valid"]
diff --git a/tests/api2/test_cloud_sync_crud.py b/tests/api2/test_cloud_sync_crud.py
deleted file mode 100644
index 2712b306ab902..0000000000000
--- a/tests/api2/test_cloud_sync_crud.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.cloud_sync import credential as _credential, task as _task
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-
-try:
-    from config import (
-        AWS_ACCESS_KEY_ID,
-        AWS_SECRET_ACCESS_KEY,
-        AWS_BUCKET
-    )
-except ImportError:
-    Reason = 'AWS credential are missing in config.py'
-    pytestmark = pytest.mark.skip(reason=Reason)
-
-
-@pytest.fixture(scope='module')
-def credentials():
-    with _credential({
-        "provider": {
-            "type": "S3",
-            "access_key_id": AWS_ACCESS_KEY_ID,
-            "secret_access_key": AWS_SECRET_ACCESS_KEY,
-        }
-    }) as c:
-        yield c
-
-
-@pytest.fixture(scope='module')
-def task(credentials):
-    with dataset("cloudsync_local") as local_dataset:
-        with _task({
-            "direction": "PUSH",
-            "transfer_mode": "COPY",
-            "path": f"/mnt/{local_dataset}",
-            "credentials": credentials["id"],
-            "attributes": {
-                "bucket": AWS_BUCKET,
-                "folder": "",
-            },
-        }) as t:
-            yield t
-
-
-def test_update_cloud_credentials(credentials):
-    call("cloudsync.credentials.update", credentials["id"], {
-        "provider": {
-            "type": "S3",
-            "access_key_id": "garbage",
-            "secret_access_key": AWS_SECRET_ACCESS_KEY,
-        }
-    })
-
-    assert call("cloudsync.credentials.get_instance", credentials["id"])["provider"]["access_key_id"] == "garbage"
-
-    call("cloudsync.credentials.update", credentials["id"], {
-        "provider": {
-            "type": "S3",
-            "access_key_id": AWS_ACCESS_KEY_ID,
-            "secret_access_key": AWS_SECRET_ACCESS_KEY,
-        },
-    })
-
-
-def test_update_cloud_sync(task):
-    assert call("cloudsync.update", task["id"], {"direction": "PULL"})
-
-
-def test_run_cloud_sync(task):
-    call("cloudsync.sync", task["id"], job=True)
-    print(ssh(f"ls {task['path']}"))
-    assert ssh(f"cat {task['path']}/freenas-test.txt") == "freenas-test\n"
-
-
-def test_restore_cloud_sync(task):
-    restore_task = call("cloudsync.restore", task["id"], {
-        "transfer_mode": "COPY",
-        "path": task["path"],
-    })
-
-    call("cloudsync.delete", restore_task["id"])
-
-
-def test_delete_cloud_credentials_error(credentials, task):
-    with pytest.raises(CallError) as ve:
-        call("cloudsync.credentials.delete", credentials["id"])
-
-    assert "This credential is used by cloud sync task" in ve.value.errmsg
diff --git a/tests/api2/test_cloud_sync_custom_s3.py b/tests/api2/test_cloud_sync_custom_s3.py
deleted file mode 100644
index 20eb79e43384a..0000000000000
--- a/tests/api2/test_cloud_sync_custom_s3.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import time
-
-import pytest
-
-from middlewared.test.integration.assets.cloud_sync import credential, task
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.mock_rclone import mock_rclone
-
-
-@pytest.mark.parametrize("credential_attributes,result", [
-    (
-        {
-
-            "endpoint": "s3.fr-par.scw.cloud",
-            "region": "fr-par",
-            "skip_region": False,
-            "signatures_v2": False,
-        },
-        {"region": "fr-par"},
-    )
-])
-def test_custom_s3(credential_attributes, result):
-    with dataset("test_cloudsync_custom_s3") as ds:
-        with credential({
-            "name": "S3",
-            "provider": {
-                "type": "S3",
-                "access_key_id": "test",
-                "secret_access_key": "test",
-                **credential_attributes,
-            },
-        }) as c:
-            with task({
-                "direction": "PUSH",
-                "transfer_mode": "COPY",
-                "path": f"/mnt/{ds}",
-                "credentials": c["id"],
-                "attributes": {
-                    "bucket": "bucket",
-                    "folder": "",
-                },
-            }) as t:
-                with mock_rclone() as mr:
-                    call("cloudsync.sync", t["id"])
-
-                    time.sleep(2.5)
-
-                    assert mr.result["config"]["remote"]["region"] == "fr-par"
diff --git a/tests/api2/test_cloud_sync_script.py b/tests/api2/test_cloud_sync_script.py
deleted file mode 100644
index 5cd789e5b759e..0000000000000
--- a/tests/api2/test_cloud_sync_script.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import pytest
-
-from truenas_api_client import ClientException
-from middlewared.test.integration.assets.cloud_sync import local_ftp_task, run_task
-from middlewared.test.integration.utils import call, ssh
-
-
-def test_pre_script_failure():
-    with local_ftp_task({
-        "pre_script": "echo Custom error\nexit 123",
-    }) as task:
-        with pytest.raises(ClientException) as ve:
-            run_task(task)
-
-        assert ve.value.error == "[EFAULT] Pre-script failed with exit code 123"
-
-        job = call("core.get_jobs", [["method", "=", "cloudsync.sync"]], {"order_by": ["-id"], "get": True})
-        assert job["logs_excerpt"] == "[Pre-script] Custom error\n"
-
-
-def test_pre_script_ok():
-    ssh("rm /tmp/cloud_sync_test", check=False)
-    with local_ftp_task({
-        "pre_script": "touch /tmp/cloud_sync_test",
-    }) as task:
-        run_task(task)
-
-        ssh("cat /tmp/cloud_sync_test")
-
-
-def test_post_script_not_running_after_failure():
-    ssh("touch /tmp/cloud_sync_test")
-    with local_ftp_task({
-        "post_script": "rm /tmp/cloud_sync_test",
-    }) as task:
-        call("service.stop", "ftp")
-
-        with pytest.raises(ClientException) as ve:
-            run_task(task)
-
-        assert "connection refused" in ve.value.error
-
-        ssh("cat /tmp/cloud_sync_test")
-
-
-def test_post_script_ok():
-    ssh("rm /tmp/cloud_sync_test", check=False)
-    with local_ftp_task({
-        "post_script": "touch /tmp/cloud_sync_test",
-    }) as task:
-        run_task(task)
-
-        ssh("cat /tmp/cloud_sync_test")
-
-
-def test_script_shebang():
-    with local_ftp_task({
-        "post_script": "#!/usr/bin/env python3\nprint('Test' * 2)",
-    }) as task:
-        run_task(task)
-
-        job = call("core.get_jobs", [["method", "=", "cloudsync.sync"]], {"order_by": ["-id"], "get": True})
-        assert job["logs_excerpt"].endswith("[Post-script] TestTest\n")
diff --git a/tests/api2/test_cloud_sync_storj.py b/tests/api2/test_cloud_sync_storj.py
deleted file mode 100644
index a7f40d5aadb55..0000000000000
--- a/tests/api2/test_cloud_sync_storj.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import pytest
-
-from config import (
-    STORJ_IX_AWS_ACCESS_KEY_ID,
-    STORJ_IX_AWS_SECRET_ACCESS_KEY,
-    STORJ_IX_BUCKET,
-)
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.assets.cloud_sync import credential, task, run_task
-from middlewared.test.integration.assets.pool import dataset
-
-
-CREDENTIAL = {
-    "provider": {
-        "type": "STORJ_IX",
-        "access_key_id": STORJ_IX_AWS_ACCESS_KEY_ID,
-        "secret_access_key": STORJ_IX_AWS_SECRET_ACCESS_KEY,
-    }
-}
-TASK_ATTRIBUTES = {
-    "bucket": STORJ_IX_BUCKET,
-    "folder": "",
-}
-FILENAME = "a"
-
-
-def test_storj_verify():
-    result = call("cloudsync.credentials.verify", {
-        "type": "STORJ_IX",
-        "access_key_id": STORJ_IX_AWS_ACCESS_KEY_ID,
-        "secret_access_key": STORJ_IX_AWS_SECRET_ACCESS_KEY,
-    })
-
-    assert result["valid"], result
-
-
-@pytest.fixture(scope="module")
-def storj_credential():
-    with credential(CREDENTIAL) as c:
-        yield c
-
-
-def test_storj_list_buckets(storj_credential):
-    assert any(item["Name"] == STORJ_IX_BUCKET for item in call("cloudsync.list_buckets", storj_credential["id"]))
-
-
-@pytest.fixture(scope="module")
-def storj_sync(storj_credential):
-    """Reset the remote bucket to only contain a single empty file."""
-    with dataset("test_storj_sync") as ds:
-        ssh(f"touch /mnt/{ds}/{FILENAME}")
-        with task({
-            "direction": "PUSH",
-            "transfer_mode": "SYNC",
-            "path": f"/mnt/{ds}",
-            "credentials": storj_credential["id"],
-            "attributes": TASK_ATTRIBUTES,
-        }) as t:
-            run_task(t)
-
-
-def test_storj_list_directory(storj_credential, storj_sync):
-    result = call("cloudsync.list_directory", {
-        "credentials": storj_credential["id"],
-        "attributes": TASK_ATTRIBUTES,
-    })
-    assert len(result) == 1
-    assert result[0]["Name"] == FILENAME
-
-
-def test_storj_pull(storj_credential, storj_sync):
-    with dataset("test_storj_sync") as ds:
-        with task({
-            "direction": "PULL",
-            "transfer_mode": "COPY",
-            "path": f"/mnt/{ds}",
-            "credentials": storj_credential["id"],
-            "attributes": TASK_ATTRIBUTES,
-        }) as t:
-            run_task(t)
-
-            assert ssh(f"ls /mnt/{ds}") == FILENAME + "\n"
diff --git a/tests/api2/test_config_upload.py b/tests/api2/test_config_upload.py
deleted file mode 100644
index 09c7b7e3c6be3..0000000000000
--- a/tests/api2/test_config_upload.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import contextlib
-import io
-import json
-import sqlite3
-import tarfile
-import os
-
-import pytest
-
-from truenas_api_client import ClientException
-from middlewared.test.integration.utils import call, session, url
-
-
-@contextlib.contextmanager
-def db_ops(db_name):
-    try:
-        with contextlib.closing(sqlite3.connect(db_name)) as conn:
-            with conn:
-                conn.execute("CREATE TABLE alembic_version (version_num VARCHAR(32) NOT NULL);")
-                conn.execute("INSERT INTO alembic_version VALUES ('invalid')")
-            yield
-    finally:
-        os.unlink(db_name)
-
-
-@contextlib.contextmanager
-def tar_ops(file_to_add):
-    tar_name = "config.tar"
-    tfile = None
-    try:
-        with tarfile.open(tar_name, "w") as tfile:
-            tfile.add(file_to_add)
-        yield tfile.name
-    finally:
-        if tfile is not None:
-            os.unlink(tfile.name)
-
-
-def test_invalid_database_file():
-    db_name = "freenas-v1.db"
-    with db_ops(db_name):
-        with tar_ops(db_name) as tar_name:
-            with session() as s:
-                r = s.post(
-                    f"{url()}/_upload",
-                    files={
-                        "data": (None, io.StringIO(json.dumps({
-                            "method": "config.upload",
-                            "params": [],
-                        }))),
-                        "file": (None, open(tar_name, "rb")),
-                    },
-                )
-                r.raise_for_status()
-                job_id = r.json()["job_id"]
-                with pytest.raises(ClientException) as ve:
-                    call("core.job_wait", job_id, job=True)
-
-                assert 'Uploaded TrueNAS database file is not valid' in ve.value.error
-                assert "Can't locate revision identified by 'invalid'" in ve.value.error
diff --git a/tests/api2/test_core_bulk.py b/tests/api2/test_core_bulk.py
deleted file mode 100644
index c519d523a3522..0000000000000
--- a/tests/api2/test_core_bulk.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from unittest.mock import ANY
-
-import pytest
-
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.utils import call, mock
-from middlewared.test.integration.utils.audit import expect_audit_log
-from truenas_api_client import ClientException
-
-
-def test_core_bulk_reports_job_id():
-    with mock("test.test1", """\
-        from middlewared.service import job, CallError
-
-        @job()
-        def mock(self, job, *args):
-            if args[0] == 0:
-                raise CallError("Error")
-            else:
-                return args[0]
-    """):
-        result = call("core.bulk", "test.test1", [[0], [10]], job=True)
-
-        assert result == [
-            {"job_id": ANY, "result": None, "error": "[EFAULT] Error"},
-            {"job_id": ANY, "result": 10, "error": None},
-        ]
-
-        job_0 = call("core.get_jobs", [["id", "=", result[0]["job_id"]]], {"get": True})
-        assert job_0["arguments"] == [0]
-        job_1 = call("core.get_jobs", [["id", "=", result[1]["job_id"]]], {"get": True})
-        assert job_1["arguments"] == [10]
-
-
-def test_authorized():
-    with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
-        with mock("test.test1", """
-            from middlewared.service import pass_app
-
-            @pass_app()
-            async def mock(self, app):
-                return app.authenticated_credentials.dump()["username"].startswith("unprivileged")
-        """):
-            assert c.call("core.bulk", "test.test1", [[]], job=True) == [{"result": True, "error": None}]
-
-
-def test_authorized_audit():
-    with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
-        with mock("test.test1", """
-            from middlewared.schema import Int
-            from middlewared.service import accepts
-
-            @accepts(Int("param"), audit="Mock", audit_extended=lambda param: str(param))
-            async def mock(self, param):
-                return 
-        """):
-            with expect_audit_log([
-                {
-                    "event": "METHOD_CALL",
-                    "event_data": {
-                        "authenticated": True,
-                        "authorized": True,
-                        "method": "test.test1",
-                        "params": [42],
-                        "description": "Mock 42",
-                    },
-                    "success": True,
-                }
-            ]):
-                c.call("core.bulk", "test.test1", [[42]], job=True)
-
-
-def test_not_authorized():
-    with unprivileged_user_client(allowlist=[]) as c:
-        with pytest.raises(ClientException) as ve:
-            c.call("core.bulk", "test.test1", [[]], job=True)
-
-        assert ve.value.error == "[EPERM] Not authorized"
-
-
-def test_not_authorized_audit():
-    with unprivileged_user_client() as c:
-        with expect_audit_log([
-            {
-                "event": "METHOD_CALL",
-                "event_data": {
-                    "authenticated": True,
-                    "authorized": False,
-                    "method": "user.create",
-                    "params": [{"username": "sergey", "full_name": "Sergey"}],
-                    "description": "Create user sergey",
-                },
-                "success": False,
-            }
-        ]):
-            with pytest.raises(ClientException):
-                c.call("core.bulk", "user.create", [[{"username": "sergey", "full_name": "Sergey"}]], job=True)
diff --git a/tests/api2/test_core_download.py b/tests/api2/test_core_download.py
deleted file mode 100644
index 0db1df72d6933..0000000000000
--- a/tests/api2/test_core_download.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import requests
-
-from middlewared.test.integration.utils.client import truenas_server
-from middlewared.test.integration.utils import call
-
-
-def test_get_download_for_config_dot_save():
-    # set up core download
-    job_id, url = call('core.download', 'config.save', [], 'freenas.db')
-
-    # download from URL
-    rv = requests.get(f'http://{truenas_server.ip}{url}')
-    assert rv.status_code == 200
-    assert len(rv.content) > 0
diff --git a/tests/api2/test_cronjob.py b/tests/api2/test_cronjob.py
deleted file mode 100644
index 683a829f41f2a..0000000000000
--- a/tests/api2/test_cronjob.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from middlewared.test.integration.utils import call, ssh
-
-TESTFILE = '/mnt/cronjob_testfile'
-
-
-def test_cron_job():
-    try:
-        id = call(
-            'cronjob.create',
-            {
-                'user': 'root',
-                'enabled': True,
-                'command': f'echo "yeah" > "{TESTFILE}"',
-                'schedule': {'minute': '*/1'}
-            }
-        )['id']
-        assert call('cronjob.query', [['id', '=', id]], {"get": True})['enabled'] is True
-    except Exception as e:
-        assert False, f'Unexpected failure: {str(e)}'
-
-    call('cronjob.run', id, job=True)
-    assert call('filesystem.statfs', TESTFILE)['blocksize']
-
-    results = ssh(f'rm "{TESTFILE}"', complete_response=True)
-    assert results['result'] is True, results['output']
-
-    call('cronjob.delete', id)
-    assert call('cronjob.query', [['id', '=', id]]) == []
diff --git a/tests/api2/test_crud.py b/tests/api2/test_crud.py
deleted file mode 100644
index 8d2dc75b0cc63..0000000000000
--- a/tests/api2/test_crud.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import contextlib
-import pytest
-
-from middlewared.test.integration.assets.privilege import privilege
-from middlewared.test.integration.utils import client
-
-
-@pytest.mark.parametrize('offset,limit', [
-    (0, 4),
-    (1, 4),
-    (2, 4),
-    (3, 4),
-    (2, 5),
-    (3, 5),
-])
-def test_query_filters(offset, limit):
-    with contextlib.ExitStack() as stack:
-        for i in range(5):
-            stack.enter_context(
-                privilege({
-                    'name': f'Test Privilege {i}',
-                    'web_shell': False
-                })
-            )
-        with client() as c:
-            query_results = c.call('privilege.query', [], {'select': ['id']})
-            expected_result = query_results[offset:offset + limit]
-            actual_result = c.call('privilege.query', [], {'offset': offset, 'limit': limit, 'select': ['id']})
-            assert actual_result == expected_result
diff --git a/tests/api2/test_crud_events.py b/tests/api2/test_crud_events.py
deleted file mode 100644
index a8f0868c562bf..0000000000000
--- a/tests/api2/test_crud_events.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import contextlib
-import threading
-import typing
-
-from middlewared.test.integration.assets.crypto import get_cert_params, root_certificate_authority
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.client import client
-
-
-def event_thread(event_endpoint: str, context: dict):
-    with client(py_exceptions=False) as c:
-        def cb(mtype, **message):
-            if len(message) != 3 or not all(
-                k in message for k in ('id', 'msg', 'collection')
-            ) or message['collection'] != event_endpoint or message['msg'] not in (
-                'added', 'changed', 'removed'
-            ):
-                return
-
-            if context['result'] is None:
-                context['result'] = message
-
-            context['received_result'].set()
-            context['shutdown_thread'].set()
-
-        c.subscribe(event_endpoint, cb)
-        context['subscribed'].set()
-        context['shutdown_thread'].wait(context['timeout'])
-
-
-@contextlib.contextmanager
-def wait_for_event(event_endpoint: str, timeout=60):
-    context = {
-        'subscribed': threading.Event(),
-        'result': None,
-        'received_result': threading.Event(),
-        'shutdown_thread': threading.Event(),
-        'timeout': timeout,
-    }
-    thread = threading.Thread(target=event_thread, args=(event_endpoint, context), daemon=True)
-    thread.start()
-    if not context['subscribed'].wait(30):
-        raise Exception('Timed out waiting for client to subscribe')
-
-    try:
-        yield context
-        if not context['received_result'].wait(timeout):
-            raise Exception('Event not received')
-    finally:
-        context['shutdown_thread'].set()
-        thread.join(timeout=5)
-
-
-def assert_result(context: dict, event_endpoint: str, oid: typing.Union[int, str], event_type: str) -> None:
-    assert context['result'] == {
-        'msg': event_type,
-        'collection': event_endpoint,
-        'id': oid,
-    }
-
-
-def test_event_create_on_non_job_method():
-    with wait_for_event('certificateauthority.query') as context:
-        with root_certificate_authority('root_ca_create_event_test') as root_ca:
-            assert root_ca['CA_type_internal'] is True, root_ca
-
-    assert_result(context, 'certificateauthority.query', root_ca['id'], 'added')
-
-
-def test_event_create_on_job_method():
-    with root_certificate_authority('root_ca_create_event_test') as root_ca:
-        with wait_for_event('certificate.query') as context:
-            cert = call('certificate.create', {
-                'name': 'cert_test',
-                'signedby': root_ca['id'],
-                'create_type': 'CERTIFICATE_CREATE_INTERNAL',
-                **get_cert_params(),
-            }, job=True)
-            try:
-                assert cert['cert_type_internal'] is True, cert
-            finally:
-                call('certificate.delete', cert['id'], job=True)
-
-        assert_result(context, 'certificate.query', cert['id'], 'added')
-
-
-def test_event_update_on_non_job_method():
-    with root_certificate_authority('root_ca_update_event_test') as root_ca:
-        assert root_ca['CA_type_internal'] is True, root_ca
-
-        with wait_for_event('certificateauthority.query') as context:
-            call('certificateauthority.update', root_ca['id'], {})
-
-        assert_result(context, 'certificateauthority.query', root_ca['id'], 'changed')
-
-
-def test_event_update_on_job_method():
-    with wait_for_event('tunable.query'):
-        tunable = call('tunable.create', {
-            'type': 'SYSCTL',
-            'var': 'kernel.watchdog',
-            'value': '1',
-        }, job=True)
-        try:
-            with wait_for_event('tunable.query') as context:
-                call('tunable.update', tunable['id'], {'value': '0'}, job=True)
-
-            assert_result(context, 'tunable.query', tunable['id'], 'changed')
-        finally:
-            call('tunable.delete', tunable['id'], job=True)
-
-
-def test_event_delete_on_non_job_method():
-    root_ca = call('certificateauthority.create', {
-        **get_cert_params(),
-        'name': 'test_root_ca_delete_event',
-        'create_type': 'CA_CREATE_INTERNAL',
-    })
-    assert root_ca['CA_type_internal'] is True, root_ca
-
-    with wait_for_event('certificateauthority.query') as context:
-        call('certificateauthority.delete', root_ca['id'])
-
-    assert_result(context, 'certificateauthority.query', root_ca['id'], 'removed')
-
-
-def test_event_delete_on_job_method():
-    with wait_for_event('tunable.query'):
-        tunable = call('tunable.create', {
-            'type': 'SYSCTL',
-            'var': 'kernel.watchdog',
-            'value': '1',
-        }, job=True)
-
-    with wait_for_event('tunable.query') as context:
-        call('tunable.delete', tunable['id'], job=True)
-
-    assert_result(context, 'tunable.query', tunable['id'], 'removed')
diff --git a/tests/api2/test_dataset_encryption_keys_in_replication.py b/tests/api2/test_dataset_encryption_keys_in_replication.py
deleted file mode 100644
index cbeb12bb732ac..0000000000000
--- a/tests/api2/test_dataset_encryption_keys_in_replication.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.replication import replication_task
-from middlewared.test.integration.utils import call
-
-
-BASE_REPLICATION = {
-    'direction': 'PUSH',
-    'transport': 'LOCAL',
-    'source_datasets': [],
-    'target_dataset': None,
-    'recursive': False,
-    'auto': False,
-    'retention_policy': 'NONE',
-}
-
-
-def encryption_props():
-    return {
-        'encryption_options': {'generate_key': True},
-        'encryption': True,
-        'inherit_encryption': False
-    }
-
-
-def make_assertions(source_datasets, task_id, target_dataset, unlocked_datasets):
-    for source_ds in source_datasets:
-        call('zfs.snapshot.create', {'dataset': source_ds, 'name': 'snaptest-1', 'recursive': True})
-
-    call('replication.run', task_id, job=True)
-    keys = call('pool.dataset.export_keys_for_replication_internal', task_id)
-    unlocked_info = call(
-        'pool.dataset.unlock', target_dataset.split('/', 1)[0], {
-            'datasets': [{'name': name, 'key': key} for name, key in keys.items()],
-            'recursive': True,
-        }, job=True
-    )
-    assert set(unlocked_info['unlocked']) == set(unlocked_datasets), unlocked_info
-
-
-def test_single_source_replication():
-    with dataset('source_test', encryption_props()) as src:
-        with dataset('parent_destination', encryption_props()) as parent_ds:
-            with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
-                with replication_task({
-                    **BASE_REPLICATION,
-                    'name': 'encryption_replication_test',
-                    'source_datasets': [src],
-                    'target_dataset': dst,
-                    'name_regex': '.+',
-                    'auto': False,
-                }) as task:
-                    make_assertions([src], task['id'], dst, [dst])
-
-
-def test_single_source_recursive_replication():
-    with dataset('source_test', encryption_props()) as src:
-        with dataset(f'{src.rsplit("/", 1)[-1]}/child_source_test', encryption_props()) as child_src:
-            with dataset('parent_destination', encryption_props()) as parent_ds:
-                with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
-                    with replication_task({
-                        **BASE_REPLICATION,
-                        'name': 'encryption_replication_test',
-                        'source_datasets': [src],
-                        'target_dataset': dst,
-                        'name_regex': '.+',
-                        'auto': False,
-                        'recursive': True,
-                    }) as task:
-                        make_assertions([src], task['id'], dst, [dst, f'{dst}/{child_src.rsplit("/", 1)[-1]}'])
-
-
-def test_single_source_child_encrypted_replication():
-    with dataset('source_test', encryption_props()) as src:
-        with dataset(f'{src.rsplit("/", 1)[-1]}/child_source_test', encryption_props()) as child_src:
-            with dataset('parent_destination', encryption_props()) as parent_ds:
-                with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
-                    with replication_task({
-                        **BASE_REPLICATION,
-                        'name': 'encryption_replication_test',
-                        'source_datasets': [child_src],
-                        'target_dataset': dst,
-                        'name_regex': '.+',
-                        'auto': False,
-                        'recursive': True,
-                    }) as task:
-                        make_assertions([child_src], task['id'], dst, [dst])
-
-
-def test_multiple_source_replication():
-    with dataset('source_test1', encryption_props()) as src1:
-        with dataset('source_test2', encryption_props()) as src2:
-            with dataset('parent_destination', encryption_props()) as parent_ds:
-                with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
-                    with replication_task({
-                        **BASE_REPLICATION,
-                        'name': 'encryption_replication_test',
-                        'source_datasets': [src1, src2],
-                        'target_dataset': dst,
-                        'name_regex': '.+',
-                        'auto': False,
-                    }) as task:
-                        make_assertions(
-                            [src1, src2], task['id'], dst, [f'{dst}/{k.rsplit("/", 1)[-1]}' for k in [src1, src2]]
-                        )
-
-
-def test_multiple_source_recursive_replication():
-    with dataset('source_test1', encryption_props()) as src1:
-        with dataset(f'{src1.rsplit("/", 1)[-1]}/child_source_test1', encryption_props()) as child_src1:
-            with dataset('source_test2', encryption_props()) as src2:
-                with dataset(f'{src2.rsplit("/", 1)[-1]}/child_source_test2', encryption_props()) as child_src2:
-                    with dataset('parent_destination', encryption_props()) as parent_ds:
-                        with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
-                            with replication_task({
-                                **BASE_REPLICATION,
-                                'name': 'encryption_replication_test',
-                                'source_datasets': [src1, src2],
-                                'target_dataset': dst,
-                                'name_regex': '.+',
-                                'auto': False,
-                                'recursive': True,
-                            }) as task:
-                                make_assertions(
-                                    [src1, src2], task['id'], dst, [
-                                        f'{dst}/{"/".join(k.rsplit("/")[-abs(n):])}' for k, n in [
-                                            (src1, 1), (src2, 1), (child_src1, 2), (child_src2, 2),
-                                        ]
-                                    ]
-                                )
-
-
-@pytest.mark.parametrize('keys_available_for_download', [False, True])
-def test_replication_task_reports_keys_available_for_download(keys_available_for_download):
-    with dataset('source_test', encryption_props() if keys_available_for_download else {}) as src:
-        with dataset('parent_destination', encryption_props() if keys_available_for_download else {}) as parent_ds:
-            with dataset(f'{parent_ds.rsplit("/", 1)[-1]}/destination_test') as dst:
-                with replication_task({
-                    **BASE_REPLICATION,
-                    'name': 'encryption_replication_test',
-                    'source_datasets': [src],
-                    'target_dataset': dst,
-                    'name_regex': '.+',
-                    'auto': False,
-                }) as task:
-                    task = call(
-                        'replication.get_instance', task['id'], {'extra': {'check_dataset_encryption_keys': True}}
-                    )
-                    assert task['has_encrypted_dataset_keys'] is keys_available_for_download, task
-
diff --git a/tests/api2/test_dataset_mount.py b/tests/api2/test_dataset_mount.py
deleted file mode 100644
index 750e0b2416792..0000000000000
--- a/tests/api2/test_dataset_mount.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-
-
-def test_dataset_mount_on_readonly_dataset():
-    src_parent_dataset_name = 'parent_src'
-    with dataset(src_parent_dataset_name) as parent_src:
-        with dataset(f'{src_parent_dataset_name}/child1', {'readonly': 'ON'}) as child1_ds:
-            with dataset(f'{src_parent_dataset_name}/child2', {'readonly': 'ON'}) as child2_ds:
-                call('zfs.dataset.create', {'name': f'{child1_ds}/failed'})
-                call('zfs.dataset.umount', parent_src, {'force': True})
-                call('zfs.dataset.mount', parent_src, {'recursive': True})
-                for source_dataset, mounted in (
-                    (parent_src, 'yes'),
-                    (child1_ds, 'yes'),
-                    (f'{child1_ds}/failed', 'no'),
-                    (child2_ds, 'yes'),
-                ):
-                    assert call('zfs.dataset.get_instance', source_dataset)['properties']['mounted']['value'] == mounted
diff --git a/tests/api2/test_dataset_unlock_validation.py b/tests/api2/test_dataset_unlock_validation.py
deleted file mode 100644
index 9b76d0ba33cf7..0000000000000
--- a/tests/api2/test_dataset_unlock_validation.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import os
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from truenas_api_client import ValidationErrors
-
-
-PASSPHRASE = '12345678'
-
-
-def encryption_props():
-    return {
-        'encryption_options': {'generate_key': False, 'passphrase': PASSPHRASE},
-        'encryption': True,
-        'inherit_encryption': False
-    }
-
-
-@pytest.mark.parametrize(
-    'nested_dir,lock_dataset', [('test_dir', True), ('parent/child', True), ('test_dir', False)]
-)
-def test_encrypted_dataset_unlock_mount_validation(nested_dir, lock_dataset):
-    with dataset('test_dataset', encryption_props()) as encrypted_ds:
-        mount_point = os.path.join('/mnt', encrypted_ds)
-
-        if lock_dataset:
-            call('pool.dataset.lock', encrypted_ds, job=True)
-            call('filesystem.set_zfs_attributes', {
-                'path': mount_point,
-                'zfs_file_attributes': {'immutable': False}
-            })
-
-        ssh(f'mkdir -p {os.path.join(mount_point, nested_dir)}')
-
-        if lock_dataset:
-            with pytest.raises(ValidationErrors) as ve:
-                call(
-                    'pool.dataset.unlock', encrypted_ds.split('/')[0],
-                    {'datasets': [{'passphrase': PASSPHRASE, 'name': encrypted_ds}], 'recursive': True}, job=True
-                )
-
-            assert ve.value.errors[0].attribute == 'unlock_options.datasets.0.force'
-            assert ve.value.errors[0].errmsg == f'\'{mount_point}\' directory is not empty (please provide' \
-                                                ' "force" flag to override this error and file/directory will be' \
-                                                ' renamed once the dataset is unlocked)'
-        else:
-            call(
-                'pool.dataset.unlock', encrypted_ds.split('/')[0],
-                {'datasets': [{'passphrase': PASSPHRASE, 'name': encrypted_ds}], 'recursive': True}, job=True
-            )
-
-    ssh(f'rm -rf {mount_point}')
diff --git a/tests/api2/test_dedup_pool_table_quota.py b/tests/api2/test_dedup_pool_table_quota.py
deleted file mode 100644
index 81ce2d7f33a68..0000000000000
--- a/tests/api2/test_dedup_pool_table_quota.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import pytest
-
-from truenas_api_client.exc import ValidationErrors
-
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call
-
-
-def dedup_pool_payload(dedup_table_quota: str | None, dedup_table_quota_value: int | None) -> dict:
-    unused_disks = call('disk.get_unused')
-    if len(unused_disks) < 2:
-        pytest.skip('Insufficient number of disks to perform this test')
-
-    return {
-        'deduplication': 'ON',
-        'topology': {
-            'data': [{
-                'type': 'STRIPE',
-                'disks': [unused_disks[0]['name']]
-            }],
-            'dedup': [{
-                'type': 'STRIPE',
-                'disks': [unused_disks[1]['name']]
-            }],
-        },
-        'dedup_table_quota': dedup_table_quota,
-        'dedup_table_quota_value': dedup_table_quota_value,
-        'allow_duplicate_serials': True,
-    }
-
-
-@pytest.fixture(scope='module')
-def dedup_pool():
-    with another_pool(dedup_pool_payload('CUSTOM', 2048)) as pool:
-        yield pool
-
-
-@pytest.mark.parametrize(
-    'dedup_table_quota,dedup_table_quota_value,error_msg,error_attr', [
-        (
-            None,
-            1024,
-            'You must set Deduplication Table Quota to CUSTOM to specify a value.',
-            'pool_create.dedup_table_quota'
-        ),
-        (
-            'AUTO',
-            1024,
-            'You must set Deduplication Table Quota to CUSTOM to specify a value.',
-            'pool_create.dedup_table_quota'
-        ),
-        (
-            'CUSTOM',
-            None,
-            'This field is required when Deduplication Table Quota is set to CUSTOM.',
-            'pool_create.dedup_table_quota_value'
-        ),
-    ]
-)
-def test_dedup_table_quota_create_validation(dedup_table_quota, dedup_table_quota_value, error_msg, error_attr):
-    with pytest.raises(ValidationErrors) as ve:
-        with another_pool(dedup_pool_payload(dedup_table_quota, dedup_table_quota_value)):
-            pass
-
-    assert ve.value.errors[0].attribute == error_attr
-    assert ve.value.errors[0].errmsg == error_msg
-
-
-def test_dedup_table_quota_value_on_create(dedup_pool):
-    assert call('pool.get_instance', dedup_pool['id'])['dedup_table_quota'] == '2048'
-
-
-@pytest.mark.parametrize(
-    'dedup_table_quota,dedup_table_quota_value,expected_value,error_msg,error_attr', [
-        (None, None, '0', '', ''),
-        (
-            None,
-            1024,
-            '',
-            'You must set Deduplication Table Quota to CUSTOM to specify a value.',
-            'pool_update.dedup_table_quota'
-        ),
-        ('AUTO', None, 'auto', '', ''),
-        (
-            'AUTO',
-            1024,
-            '',
-            'You must set Deduplication Table Quota to CUSTOM to specify a value.',
-            'pool_update.dedup_table_quota'
-        ),
-        ('CUSTOM', 1024, '1024', '', ''),
-        (
-            'CUSTOM',
-            None,
-            '',
-            'This field is required when Deduplication Table Quota is set to CUSTOM.',
-            'pool_update.dedup_table_quota_value'
-        ),
-    ]
-)
-def test_dedup_table_quota_update(
-    dedup_pool, dedup_table_quota, dedup_table_quota_value, expected_value, error_msg, error_attr
-):
-    if error_msg:
-        with pytest.raises(ValidationErrors) as ve:
-            call(
-                'pool.update', dedup_pool['id'], {
-                    'dedup_table_quota': dedup_table_quota,
-                    'dedup_table_quota_value': dedup_table_quota_value,
-                    'allow_duplicate_serials': True,
-                }, job=True)
-        assert ve.value.errors[0].attribute == error_attr
-        assert ve.value.errors[0].errmsg == error_msg
-    else:
-        call(
-            'pool.update', dedup_pool['id'], {
-                'dedup_table_quota': dedup_table_quota,
-                'dedup_table_quota_value': dedup_table_quota_value,
-                'allow_duplicate_serials': True
-            }, job=True)
-        assert call('pool.get_instance', dedup_pool['id'])['dedup_table_quota'] == expected_value
diff --git a/tests/api2/test_device_get_disk_names.py b/tests/api2/test_device_get_disk_names.py
deleted file mode 100644
index 62437a8089076..0000000000000
--- a/tests/api2/test_device_get_disk_names.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from middlewared.test.integration.utils import call
-
-
-def test_device_get_disk_names():
-    assert set(list(call('device.get_disks', False, True))) == set(call('device.get_disk_names'))
diff --git a/tests/api2/test_device_get_disks_size.py b/tests/api2/test_device_get_disks_size.py
deleted file mode 100644
index 47df48d8f581e..0000000000000
--- a/tests/api2/test_device_get_disks_size.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from middlewared.test.integration.utils import call, ssh
-
-
-def test_device_get_disks_size():
-    boot_disk = call('boot.get_disks')[0]
-    fdisk_size = int(ssh(f'fdisk -s /dev/{boot_disk}').strip()) * 1024
-    assert call('device.get_disks')[boot_disk]['size'] == fdisk_size
diff --git a/tests/api2/test_disk_format.py b/tests/api2/test_disk_format.py
deleted file mode 100644
index a3f367102b8e0..0000000000000
--- a/tests/api2/test_disk_format.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import json
-import time
-
-from middlewared.test.integration.utils import call, ssh
-
-"""
-We use 'parted' to partition disks.
-Verification is based on 'parted' documentation (https://people.redhat.com/msnitzer/docs/io-limits.txt):
-    The heuristic parted uses is:
-    1)  Always use the reported 'alignment_offset' as the offset for the
-        start of the first primary partition.
-    2a) If 'optimal_io_size' is defined (not 0) align all partitions on an
-        'optimal_io_size' boundary.
-    2b) If 'optimal_io_size' is undefined (0) and 'alignment_offset' is 0
-        and 'minimum_io_size' is a power of 2: use a 1MB default alignment.
-        - as you can see this is the catch all for "legacy" devices which
-          don't appear to provide "I/O hints"; so in the default case all
-          partitions will align on a 1MB boundary.
-        - NOTE: we can't distinguish between a "legacy" device and modern
-          device that provides "I/O hints" with alignment_offset=0 and
-          optimal_io_size=0.  Such a device might be a single SAS 4K device.
-          So worst case we lose < 1MB of space at the start of the disk.
-"""
-# Some 'constants'
-MBR_SECTOR_GAP = 34
-ONE_MB = 1048576
-DATA_TYPE_UUID = "6a898cc3-1dd2-11b2-99a6-080020736631"
-
-
-def get_parted_info(disk_path):
-    # By the time this is called, the disk has been formatted
-    # but the kernel might not have been made fully aware of the changes
-    # so let's retry a bit before failing
-    for i in range(10):
-        pbytes = json.loads(ssh(f'parted {disk_path} unit b p --json'))['disk']
-        if pbytes.get('partitions') is None:
-            time.sleep(1)
-        else:
-            break
-    else:
-        assert False, f'parted tool failed to find partitions (in bytes) on {disk_path!r} ({pbytes!r})'
-
-    for i in range(10):
-        psectors = json.loads(ssh(f'parted {disk_path} unit s p --json'))['disk']
-        if psectors.get('partitions') is None:
-            time.sleep(1)
-        else:
-            break
-    else:
-        assert False, f'parted tool failed to find partitions (in sectors) on {disk_path!r} ({psectors!r})'
-
-    return pbytes, psectors
-
-
-def test_disk_format_and_wipe():
-    """Generate a single data partition"""
-    # get an unused disk and format it
-    unused = call('disk.get_unused')
-    assert unused, 'Need at least 1 unused disk'
-    call('disk.format', unused[0]['name'])
-    partitions = call('disk.list_partitions', unused[0]['name'])
-    assert partitions, partitions
-
-    # The first and only partition should be data
-    assert len(partitions) == 1, partitions
-    partition = partitions[0]
-    assert partition['partition_type'] == DATA_TYPE_UUID
-
-    # we used libparted to format a drive so let's
-    # validate our API matches parted output (NOTE:
-    # we check both bytes and sectors)
-    parted_bytes, parted_sectors = get_parted_info(f'/dev/{unused[0]["name"]}')
-
-    # sanity check (make sure parted shows same number of partitions)
-    assert len(parted_bytes['partitions']) == len(partitions), parted_bytes['partitions']
-    assert len(parted_sectors['partitions']) == len(partitions), parted_sectors['partitions']
-
-    # validate our API shows proper start/end sizes in bytes
-    pbyte = parted_bytes['partitions'][0]
-    assert int(pbyte['size'].split('B')[0]) == partition['size']
-    assert int(pbyte['start'].split('B')[0]) == partition['start']
-    assert int(pbyte['end'].split('B')[0]) == partition['end']
-
-    # validate our API shows proper start/end sizes in sectors
-    psect = parted_sectors['partitions'][0]
-    assert int(psect['start'].split('s')[0]) == partition['start_sector']
-    assert int(psect['end'].split('s')[0]) == partition['end_sector']
-
-    # verify wipe disk should removes partition labels
-    call('disk.wipe', partition['disk'], 'QUICK', job=True)
-    # the partitions are removed
-    new_parts = call('disk.list_partitions', partition['disk'])
-    assert len(new_parts) == 0, new_parts
-
-    # sanity check, make sure parted doesn't see partitions either
-    pbytes = json.loads(ssh(f'parted /dev/{unused[0]["name"]} unit b p --json'))['disk']
-    assert pbytes.get('partitions') is None, repr(pbytes)
diff --git a/tests/api2/test_disk_get_dev_size.py b/tests/api2/test_disk_get_dev_size.py
deleted file mode 100644
index 086981d849687..0000000000000
--- a/tests/api2/test_disk_get_dev_size.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import json
-
-import pytest
-
-from middlewared.test.integration.utils import call, ssh
-
-
-@pytest.fixture(scope="session")
-def blockdevices():
-    return {i['name']: i for i in json.loads(ssh('lsblk -bJ -o NAME,SIZE'))['blockdevices']}
-
-
-def test_get_dev_size_for_all_disks(blockdevices):
-    for disk, disk_info in blockdevices.items():
-        assert disk_info['size'] == call('disk.get_dev_size', disk)
diff --git a/tests/api2/test_disk_stats.py b/tests/api2/test_disk_stats.py
deleted file mode 100644
index e117d83e171c8..0000000000000
--- a/tests/api2/test_disk_stats.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-import pytest
-
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, ssh
-
-
-def get_test_file_path(pool_name: str) -> str:
-    return os.path.join('/mnt', pool_name, 'test_file')
-
-
-@pytest.fixture(scope='module')
-def disk_pool():
-    with another_pool() as pool:
-        call('pool.dataset.update', pool['name'], {'sync': 'ALWAYS'})
-        pool_disks = call('disk.query', [['pool', '=', pool['name']]], {'extra': {'pools': True}})
-        assert len(pool_disks) == 1, f'Expected 1 disk in pool {pool["name"]}, got {len(pool_disks)}'
-        yield pool['name'], pool_disks[0]
-
-
-def test_disk_write_stats(disk_pool):
-    pool_name, pool_disk = disk_pool
-    disk_identifier = pool_disk['identifier']
-
-    disk_stats_before_write = call('netdata.get_disk_stats')[disk_identifier]
-    test_file_path = get_test_file_path(pool_name)
-
-    # Amount of data to write
-    num_of_mb = 100
-    data_size = num_of_mb * 1024 * 1024  # 100 MB
-
-    ssh(f'dd if=/dev/urandom of={test_file_path} bs=1M count={num_of_mb} oflag=sync')
-
-    disk_stats_after_write = call('netdata.get_disk_stats')[disk_identifier]
-
-    expected_write_in_kb = data_size / 1024
-    actual_writes = disk_stats_after_write['writes'] - disk_stats_before_write['writes']
-    assert actual_writes == pytest.approx(expected_write_in_kb, rel=0.1)
diff --git a/tests/api2/test_disk_temperature.py b/tests/api2/test_disk_temperature.py
deleted file mode 100644
index ded544bccffe6..0000000000000
--- a/tests/api2/test_disk_temperature.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import time
-from unittest.mock import ANY
-
-import pytest
-
-from middlewared.test.integration.utils import call, mock
-
-
-
-@pytest.fixture(autouse=True, scope="function")
-def reset_temperature_cache():
-    call("disk.reset_temperature_cache")
-
-
-def test_disk_temperature():
-    with mock("disk.temperature_uncached", return_value=50):
-        assert call("disk.temperature", "sda") == 50
-
-
-def test_disk_temperature_cache():
-    with mock("disk.temperature_uncached", return_value=50):
-        call("disk.temperature", "sda")
-
-    with mock("disk.temperature_uncached", exception=True):
-        assert call("disk.temperature", "sda", {"cache": 300}) == 50
-
-
-def test_disk_temperature_cache_expires():
-    with mock("disk.temperature_uncached", return_value=50):
-        call("disk.temperature", "sda")
-
-    time.sleep(3)
-
-    with mock("disk.temperature_uncached", return_value=60):
-        assert call("disk.temperature", "sda", {"cache": 2}) == 60
-
-
-def test_disk_temperatures_only_cached():
-    with mock("disk.temperature_uncached", return_value=50):
-        call("disk.temperature", "sda")
-
-    with mock("disk.temperature_uncached", exception=True):
-        assert call("disk.temperatures", ["sda"], {"only_cached": True}) == {"sda": 50}
-
-
-def test_disk_temperature_alerts():
-    sda_temperature_alert = {
-        "uuid": "a11a16a9-a28b-4005-b11a-bce6af008d86",
-        "source": "",
-        "klass": "SMART",
-        "args": {
-            "device": "/dev/sda",
-            "message": "Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)"
-        },
-        "node": "Controller A",
-        "key": "{\"device\": \"/dev/sda\", \"message\": \"Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)\"}",
-        "datetime": {
-            "$date": 1657098825510
-        },
-        "last_occurrence": {
-            "$date": 1657185226656
-        },
-        "dismissed": False,
-        "mail": None,
-        "text": "%(message)s.",
-        "id": "a11a16a9-a28b-4005-b11a-bce6af008d86",
-        "level": "CRITICAL",
-        "formatted": "Device: /dev/sda, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63).",
-        "one_shot": True,
-    }
-    sdb_temperature_alert = {
-        "uuid": "66e29e1c-2948-4473-928a-3ccf0c0aefa9",
-        "source": "",
-        "klass": "SMART",
-        "args": {
-            "device": "/dev/sdb",
-            "message": "Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)"
-        },
-        "node": "Controller A",
-        "key": "{\"device\": \"/dev/sdb\", \"message\": \"Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63)\"}",
-        "datetime": {
-            "$date": 1657098825510
-        },
-        "last_occurrence": {
-            "$date": 1657185226656
-        },
-        "dismissed": False,
-        "mail": None,
-        "text": "%(message)s.",
-        "id": "a11a16a9-a28b-4005-b11a-bce6af008d86",
-        "level": "CRITICAL",
-        "formatted": "Device: /dev/sdb, Temperature 60 Celsius reached critical limit of 50 Celsius (Min/Max 25/63).",
-        "one_shot": True,
-    }
-    unrelated_alert = {
-        "uuid": "c371834a-5168-474d-a6d0-9eac02ad29a7",
-        "source": "",
-        "klass": "ScrubStarted",
-        "args": "temp",
-        "node": "Controller A",
-        "key": "\"temp\"",
-        "datetime": {
-            "$date": 1657713495028
-        },
-        "last_occurrence": {
-            "$date": 1657713495028
-        },
-        "dismissed": False,
-        "mail": None,
-        "text": "Scrub of pool %r started.",
-        "id": "c371834a-5168-474d-a6d0-9eac02ad29a7",
-        "level": "INFO",
-        "formatted": "Scrub of pool 'temp' started.",
-        "one_shot": True,
-    }
-
-    with mock("alert.list", return_value=[sda_temperature_alert, sdb_temperature_alert, unrelated_alert]):
-        assert call("disk.temperature_alerts", ["sda"]) == [dict(sda_temperature_alert,
-                                                                 datetime=ANY,
-                                                                 last_occurrence=ANY)]
diff --git a/tests/api2/test_disk_wipe.py b/tests/api2/test_disk_wipe.py
deleted file mode 100644
index a4cc368078a1d..0000000000000
--- a/tests/api2/test_disk_wipe.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import time
-
-import pytest
-
-from auto_config import ha
-from middlewared.test.integration.utils import call, ssh
-
-VMFS_MAGIC_STRING_B64 = "DdABwA=="
-VMFS_MAGIC_STRING_WFS = "VMFS_volume_member"
-
-
-def test_disk_wipe_partition_clean():
-    """Confirm we clean up around the middle partitions"""
-    signal_msg = "ix private data"
-    disk = call("disk.get_unused")[0]["name"]
-
-    # Create a data partition
-    call('disk.format', disk)
-    parts = call('disk.list_partitions', disk)
-    seek_blk = parts[0]['start_sector']
-    blk_size = parts[0]['start'] // parts[0]['start_sector']
-
-    # Fake a VMFS volume at start of disk
-    ssh(
-        f'echo -n {VMFS_MAGIC_STRING_B64} > vmfs;'
-        f"base64 -d vmfs | dd of=/dev/{disk} bs=1M seek=1 count=1 status=none"
-    )
-    assert VMFS_MAGIC_STRING_WFS in ssh(f"wipefs /dev/{disk}")
-
-    # Write some private data into the start of the data partition
-    ssh(
-        f"echo '{signal_msg}' > junk;"
-        f"dd if=junk bs={blk_size} count=1 oseek={seek_blk} of=/dev/{disk};"
-        "rm -f junk"
-    )
-
-    # Confirm presence of signal_message
-    readback_presence = ssh(f"dd if=/dev/{disk} bs={blk_size} iseek={seek_blk} count=1").splitlines()[0]
-    assert signal_msg in readback_presence
-
-    # Clean the drive
-    call('disk.wipe', disk, 'QUICK', job=True)
-
-    # Confirm it's now clean
-    assert VMFS_MAGIC_STRING_WFS not in ssh(f"wipefs /dev/{disk}")
-    readback_clean = ssh(f"dd if=/dev/{disk} bs={blk_size} iseek={seek_blk} count=1").splitlines()[0]
-    assert signal_msg not in readback_clean
-
-    # Confirm we have no partitions from middleware
-    partitions = call('disk.list_partitions', disk)
-    assert len(partitions) == 0
-
-    # Confirm the kernel partition tables indicate no partitions
-    proc_partitions = str(ssh('cat /proc/partitions'))
-    # If the wipe is truly successful /proc/partitions should have a singular
-    # entry for 'disk' in the table
-    assert len([line for line in proc_partitions.splitlines() if disk in line]) == 1
-
-
-@pytest.mark.parametrize('dev_name', ['BOOT', 'UNUSED', 'bogus', ''])
-def test_disk_get_partitions_quick(dev_name):
-    """
-    dev_name:
-        'BOOT'   - find a proper device that has partitions
-        'UNUSED' - find a proper device that does not have partitons
-    All others are failure tests.  All failures are properly handled
-    and should return an empty dictionary
-    """
-    has_partitions = False
-    if 'BOOT' == dev_name:
-        dev_name = call('boot.get_disks')[0]
-        has_partitions = True
-    elif 'UNUSED' == dev_name:
-        # NOTE: 'unused' disks typically have no partitions
-        dev_name = call('disk.get_unused')[0]['name']
-
-    parts = call('disk.get_partitions_quick', dev_name)
-    assert has_partitions == (len(parts) > 0)
-
-
-def test_disk_wipe_abort():
-    """Test that we can sucessfully abort a disk.wipe job"""
-    expected_pids = set()
-    if ha:
-        # In HA systems fenced may be using the disk.  Obtain the PID
-        # so that we can ignore it.
-        fenced_info = call('failover.fenced.run_info')
-        if fenced_info['running']:
-            expected_pids.add(str(fenced_info['pid']))
-
-    # Obtain a disk to wipe
-    disk = call("disk.get_unused")[0]["name"]
-
-    job_id = call("disk.wipe", disk, "FULL")
-
-    # Wait for wipe process to actually start
-    for i in range(20):
-        job = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
-        if job["progress"]["percent"] > 0:
-            break
-
-        time.sleep(0.1)
-    else:
-        assert False, job
-
-    call("core.job_abort", job_id)
-
-    for i in range(20):
-        result = set(ssh(f"fuser /dev/{disk}", check=False).strip().split())
-        # Check that only the expected PIDs are using the disk
-        # (which means that the abort was completed successfully)
-        if result == expected_pids:
-            # Ensure that the job was aborted before completion
-            job = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
-            assert job["state"] == "ABORTED"
-            assert job["progress"]["percent"] < 95
-            break
-
-        time.sleep(0.1)
-    else:
-        assert False, result
diff --git a/tests/api2/test_disk_zfs_guid.py b/tests/api2/test_disk_zfs_guid.py
deleted file mode 100644
index e4b305037701d..0000000000000
--- a/tests/api2/test_disk_zfs_guid.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.mock import mock
-from middlewared.test.integration.utils.mock_db import mock_table_contents
-from middlewared.test.integration.utils.time_utils import utc_now
-
-
-DISK_TEMPLATE = {
-    "disk_subsystem": "scsi",
-    "disk_number": 2160,
-    "disk_serial": "",
-    "disk_lunid": None,
-    "disk_size": "17179869184",
-    "disk_description": "",
-    "disk_transfermode": "Auto",
-    "disk_hddstandby": "Always On",
-    "disk_advpowermgmt": "Disabled",
-    "disk_togglesmart": True,
-    "disk_smartoptions": "",
-    "disk_expiretime": None,
-    "disk_enclosure_slot": None,
-    "disk_passwd": "",
-    "disk_critical": None,
-    "disk_difference": None,
-    "disk_informational": None,
-    "disk_model": "VBOX_HARDDISK",
-    "disk_rotationrate": None,
-    "disk_type": "HDD",
-    "disk_kmip_uid": None,
-    "disk_zfs_guid": None,
-    "disk_bus": "ATA"
-}
-
-
-def test_does_not_set_zfs_guid_for_expired_disk():
-    with mock_table_contents(
-        "storage.disk",
-        [
-            {**DISK_TEMPLATE, "disk_identifier": "{serial}1", "disk_name": "sda", "disk_expiretime": utc_now()},
-            {**DISK_TEMPLATE, "disk_identifier": "{serial}2", "disk_name": "sda"},
-        ],
-    ):
-        with mock("pool.flatten_topology", return_value=[
-            {"type": "DISK", "disk": "sda", "guid": "guid1"},
-        ]):
-            call("disk.sync_zfs_guid", {
-                "topology": "MOCK",
-            })
-
-            assert call(
-                "datastore.query", "storage.disk", [["disk_identifier", "=", "{serial}1"]], {"get": True},
-            )["disk_zfs_guid"] is None
-            assert call(
-                "datastore.query", "storage.disk", [["disk_identifier", "=", "{serial}2"]], {"get": True},
-            )["disk_zfs_guid"] == "guid1"
-
-
-def test_does_not_return_expired_disks_with_same_guid():
-    with mock_table_contents(
-        "storage.disk",
-        [
-            {**DISK_TEMPLATE, "disk_identifier": "{serial}1", "disk_name": "sda", "disk_expiretime": utc_now(),
-             "disk_zfs_guid": "guid1"},
-            {**DISK_TEMPLATE, "disk_identifier": "{serial}2", "disk_name": "sda", "disk_zfs_guid": "guid1"},
-        ]
-    ):
-        assert call("disk.disk_by_zfs_guid", "guid1")["identifier"] == "{serial}2"
diff --git a/tests/api2/test_docker_roles.py b/tests/api2/test_docker_roles.py
deleted file mode 100644
index 97bf6faf39fe3..0000000000000
--- a/tests/api2/test_docker_roles.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize('method, role, valid_role, valid_role_exception', (
-    ('docker.status', 'DOCKER_READ', True, False),
-    ('docker.status', 'DOCKER_WRITE', True, False),
-    ('docker.status', 'CATALOG_READ', False, False),
-    ('docker.config', 'DOCKER_READ', True, False),
-    ('docker.config', 'DOCKER_WRITE', True, False),
-    ('docker.config', 'CATALOG_READ', False, False),
-    ('docker.nvidia_status', 'DOCKER_READ', True, False),
-    ('docker.nvidia_status', 'DOCKER_WRITE', True, False),
-    ('docker.nvidia_status', 'CATALOG_READ', False, False),
-    ('docker.update', 'DOCKER_READ', False, False),
-    ('docker.update', 'DOCKER_WRITE', True, True),
-))
-def test_apps_roles(unprivileged_user_fixture, method, role, valid_role, valid_role_exception):
-    common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=valid_role_exception)
diff --git a/tests/api2/test_docker_setup.py b/tests/api2/test_docker_setup.py
deleted file mode 100644
index f956dbc227e6e..0000000000000
--- a/tests/api2/test_docker_setup.py
+++ /dev/null
@@ -1,90 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.docker import docker
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.docker import dataset_props, IX_APPS_MOUNT_PATH
-
-
-ENC_POOL_PASSWORD = 'test1234'
-
-
-@pytest.fixture(scope='module')
-def docker_pool():
-    with another_pool() as pool:
-        with docker(pool) as docker_config:
-            yield docker_config
-
-
-@pytest.fixture(scope='module')
-def docker_encrypted_pool():
-    with another_pool({
-        'name': 'docker_enc_pool',
-        'encryption': True,
-        'encryption_options': {'passphrase': ENC_POOL_PASSWORD}
-    }) as pool:
-        with docker(pool) as docker_config:
-            yield docker_config
-
-
-def test_docker_datasets_properties(docker_pool):
-    docker_config = call('docker.config')
-    datasets = {
-        ds['name']: ds['properties'] for ds in call('zfs.dataset.query', [['id', '^', docker_config['dataset']]])
-    }
-    for ds_name, current_props in datasets.items():
-        invalid_props = {}
-        for to_check_prop, to_check_prop_value in dataset_props(ds_name).items():
-            if current_props[to_check_prop]['value'] != to_check_prop_value:
-                invalid_props[to_check_prop] = current_props[to_check_prop]['value']
-
-        assert invalid_props == {}, f'{ds_name} has invalid properties: {invalid_props}'
-
-
-def test_correct_docker_dataset_is_mounted(docker_pool):
-    docker_config = call('docker.config')
-    assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
-
-
-def test_catalog_synced_properly(docker_pool):
-    assert call('catalog.synced') is True
-
-
-def test_catalog_sync_location(docker_pool):
-    assert call('catalog.config')['location'] == '/mnt/.ix-apps/truenas_catalog'
-
-
-def test_apps_being_reported(docker_pool):
-    assert call('app.available', [], {'count': True}) != 0
-
-
-def test_apps_are_running(docker_pool):
-    assert call('docker.status')['status'] == 'RUNNING'
-
-
-def test_apps_dataset_after_address_pool_update(docker_pool):
-    docker_config = call('docker.update', {'address_pools': [{'base': '172.17.0.0/12', 'size': 27}]}, job=True)
-    assert docker_config['address_pools'] == [{'base': '172.17.0.0/12', 'size': 27}]
-    assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
-    assert call('docker.status')['status'] == 'RUNNING'
-
-
-def test_correct_docker_dataset_is_mounted_on_enc_pool(docker_encrypted_pool):
-    docker_config = call('docker.config')
-    assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
-
-
-def test_docker_locked_dataset_mount(docker_encrypted_pool):
-    docker_config = call('docker.config')
-    call('pool.dataset.lock', docker_encrypted_pool['pool'], job=True)
-    assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] != docker_config['dataset']
-
-
-def test_docker_unlocked_dataset_mount(docker_encrypted_pool):
-    docker_config = call('docker.config')
-    call(
-        'pool.dataset.unlock', docker_encrypted_pool['pool'], {
-            'datasets': [{'passphrase': ENC_POOL_PASSWORD, 'name': docker_encrypted_pool['pool']}], 'recursive': True
-        }, job=True
-    )
-    assert call('filesystem.statfs', IX_APPS_MOUNT_PATH)['source'] == docker_config['dataset']
diff --git a/tests/api2/test_draid.py b/tests/api2/test_draid.py
deleted file mode 100644
index e8c217427da6f..0000000000000
--- a/tests/api2/test_draid.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import pytest
-
-from truenas_api_client import ValidationErrors
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call
-
-
-POOL_NAME = 'test_draid_pool'
-
-
-@pytest.mark.parametrize(
-    'n_data,n_spare,n_parity', [
-        (1, 0, 1),
-        (1, 1, 1),
-        (1, 0, 2),
-        (1, 1, 2),
-        (2, 2, 2),
-        (1, 1, 3),
-    ]
-)
-def test_valid_draid_pool_creation(n_data, n_spare, n_parity):
-    unused_disks = call('disk.get_unused')
-    if len(unused_disks) < 5:
-        pytest.skip('Insufficient number of disk to perform these test')
-
-    children = n_data + n_parity + n_spare
-    with another_pool({
-        'name': POOL_NAME,
-        'topology': {
-            'data': [{
-                'disks': [disk['name'] for disk in unused_disks[:children]],
-                'type': f'DRAID{n_parity}',
-                'draid_data_disks': n_data,
-                'draid_spare_disks': n_spare
-            }],
-        },
-        'allow_duplicate_serials': True,
-    }) as draid:
-        assert draid['topology']['data'][0]['name'] == f'draid{n_parity}:{n_data}d:{children}c:{n_spare}s-0'
-        unused_disk_for_update = call('disk.get_unused')
-        if len(unused_disk_for_update) >= children:
-            draid_pool_updated = call(
-                'pool.update', draid['id'], {
-                    'topology': {
-                        'data': [{
-                            'type': f'DRAID{n_parity}',
-                            'disks': [disk['name'] for disk in unused_disk_for_update[:children]],
-                            'draid_data_disks': n_data,
-                            'draid_spare_disks': n_spare
-                        }]
-                    },
-                    'allow_duplicate_serials': True,
-                }, job=True)
-            assert len(draid_pool_updated['topology']['data']) == 2
-            assert draid_pool_updated['topology']['data'][1]['name'] == f'draid{n_parity}:{n_data}d:{children}c' \
-                                                                        f':{n_spare}s-1'
-
-
-@pytest.mark.parametrize(
-    'n_data,n_spare,n_parity,minimum_disk', [
-        (0, 0, 1, 2),
-        (0, 2, 1, 2),
-        (0, 0, 2, 3),
-        (0, 0, 3, 4),
-        (0, 2, 1, 2),
-        (0, 2, 2, 3),
-    ]
-)
-def test_invalid_draid_pool_creation(n_data, n_spare, n_parity, minimum_disk):
-    unused_disks = call('disk.get_unused')
-    if len(unused_disks) < 3:
-        pytest.skip('Insufficient number of disk to perform these test')
-
-    children = n_data + n_parity + n_spare
-
-    with pytest.raises(ValidationErrors) as ve:
-        call('pool.create', {
-            'name': POOL_NAME,
-            'topology': {
-                'data': [{
-                    'disks': [disk['name'] for disk in unused_disks[:children]],
-                    'type': f'DRAID{n_parity}',
-                    'draid_data_disks': n_data,
-                    'draid_spare_disks': n_spare,
-                }],
-            },
-            'allow_duplicate_serials': True,
-        }, job=True)
-
-    if n_spare:
-        assert ve.value.errors[0].attribute == 'pool_create.topology.data.0.type'
-        assert ve.value.errors[0].errmsg == f'Requested number of dRAID data disks per group {n_data}' \
-                                            f' is too high, at most {children - n_spare - n_parity}' \
-                                            f' disks are available for data'
-    else:
-        assert ve.value.errors[0].attribute == 'pool_create.topology.data.0.disks'
-        assert ve.value.errors[0].errmsg == f'You need at least {minimum_disk} disk(s) for this vdev type.'
diff --git a/tests/api2/test_draid_record_and_block_size.py b/tests/api2/test_draid_record_and_block_size.py
deleted file mode 100644
index b2dc129d79e49..0000000000000
--- a/tests/api2/test_draid_record_and_block_size.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call
-
-from auto_config import ha
-
-
-@pytest.fixture(scope='module')
-def check_unused_disks():
-    if len(call('disk.get_unused')) < 4:
-        pytest.skip('Insufficient number of disks to perform these tests')
-
-
-@pytest.fixture(scope='module')
-def draid_pool():
-    unused_disks = call('disk.get_unused')
-    with another_pool({
-        'name': 'test_draid_pool',
-        'topology': {
-            'data': [{
-                'disks': [disk['name'] for disk in unused_disks[:2]],
-                'type': 'DRAID1',
-                'draid_data_disks': 1
-            }],
-        },
-        'allow_duplicate_serials': True,
-    }) as pool_name:
-        yield pool_name
-
-
-@pytest.fixture(scope='module')
-def mirror_pool():
-    unused_disks = call('disk.get_unused')
-    with another_pool({
-        'name': 'test_mirror_pool',
-        'topology': {
-            'data': [{
-                'disks': [disk['name'] for disk in unused_disks[:2]],
-                'type': 'MIRROR',
-            }],
-        },
-        'allow_duplicate_serials': True,
-    }) as pool_name:
-        yield pool_name
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'record_size', ['1M']
-)
-def test_draid_pool_default_record_size(draid_pool, record_size):
-    assert call('pool.dataset.get_instance', draid_pool['name'])['recordsize']['value'] == record_size
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'record_size', ['128K']
-)
-def test_non_draid_pool_default_record_size(mirror_pool, record_size):
-    assert call('pool.dataset.get_instance', mirror_pool['name'])['recordsize']['value'] == record_size
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'update_recordsize, validation_error', [
-        ('512K', False),
-        ('256K', False),
-        ('128K', False),
-        ('2M', False),
-        ('512', True),
-        ('4K', True),
-        ('64K', True),
-    ]
-)
-def test_draid_root_dataset_valid_recordsize(draid_pool, update_recordsize, validation_error):
-    if not validation_error:
-        assert call(
-            'pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize}
-        )['recordsize']['value'] == update_recordsize
-    else:
-        with pytest.raises(ValidationErrors) as ve:
-            call('pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize})
-
-        assert ve.value.errors[0].attribute == 'pool_dataset_update.recordsize'
-        assert ve.value.errors[0].errmsg == f"'{update_recordsize}' is an invalid recordsize."
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'update_recordsize', ['512K', '256K', '128K', '2M', '512', '4K', '64K']
-)
-def test_non_draid_root_dataset_valid_recordsize(mirror_pool, update_recordsize):
-    assert call(
-        'pool.dataset.update', mirror_pool['name'], {'recordsize': update_recordsize}
-    )['recordsize']['value'] == update_recordsize
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'recordsize, validation_error', [
-        ('512K', False),
-        ('256K', False),
-        ('128K', False),
-        ('2M', False),
-        ('512', True),
-        ('4K', True),
-        ('64K', True),
-    ]
-)
-def test_draid_dataset_valid_recordsize(draid_pool, recordsize, validation_error):
-    if not validation_error:
-        assert call(
-            'pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset_{recordsize}', 'recordsize': recordsize}
-        )['recordsize']['value'] == recordsize
-    else:
-        with pytest.raises(ValidationErrors) as ve:
-            call('pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset_{recordsize}',
-                                         'recordsize': recordsize})
-
-        assert ve.value.errors[0].attribute == 'pool_dataset_create.recordsize'
-        assert ve.value.errors[0].errmsg == f"'{recordsize}' is an invalid recordsize."
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'recordsize', ['512K', '256K', '128K', '2M', '512', '4K', '64K']
-)
-def test_non_draid_dataset_valid_recordsize(mirror_pool, recordsize):
-    assert call(
-        'pool.dataset.create', {'name': f'{mirror_pool["name"]}/test_dataset_{recordsize}', 'recordsize': recordsize}
-    )['recordsize']['value'] == recordsize
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'blocksize,validation_error', [
-        ('16K', True),
-        ('32K', False),
-    ]
-)
-def test_draid_zvol_valid_blocksize(draid_pool, blocksize, validation_error):
-    if not validation_error:
-        assert call(
-            'pool.dataset.create', {
-                'name': f'{draid_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224,
-                'volblocksize': blocksize, 'type': 'VOLUME',
-            }
-        )['volblocksize']['value'] == blocksize
-    else:
-        with pytest.raises(ValidationErrors) as ve:
-            call(
-                'pool.dataset.create', {
-                    'name': f'{draid_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224,
-                    'volblocksize': blocksize, 'type': 'VOLUME'
-                }
-            )
-
-        assert ve.value.errors[0].attribute == 'pool_dataset_create.volblocksize'
-        assert ve.value.errors[0].errmsg == 'Volume block size must be greater than or equal to 32K for dRAID pools'
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'blocksize', ['16K', '32K']
-)
-def test_non_draid_zvol_valid_blocksize(mirror_pool, blocksize):
-    assert call(
-        'pool.dataset.create', {
-            'name': f'{mirror_pool["name"]}/test_dataset_{blocksize}', 'volsize': 268468224,
-            'volblocksize': blocksize, 'type': 'VOLUME',
-        }
-    )['volblocksize']['value'] == blocksize
-
-
-@pytest.mark.usefixtures('check_unused_disks')
-@pytest.mark.parametrize(
-    'update_recordsize, default_record_size', [
-        ('512K', '1M'),
-    ]
-)
-def test_draid_dataset_default_recordsize(draid_pool, update_recordsize, default_record_size):
-    assert call(
-        'pool.dataset.update', draid_pool['name'], {'recordsize': update_recordsize}
-    )['recordsize']['value'] == update_recordsize
-
-    assert call(
-        'pool.dataset.create', {'name': f'{draid_pool["name"]}/test_dataset'}
-    )['recordsize']['value'] == default_record_size
diff --git a/tests/api2/test_enable_disable_services.py b/tests/api2/test_enable_disable_services.py
deleted file mode 100644
index bd20cdee12993..0000000000000
--- a/tests/api2/test_enable_disable_services.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from middlewared.test.integration.utils import call
-
-
-def test_01_enable_services():
-    for svc in filter(lambda x: not x['enable'], call('service.query')):
-        call('service.update', svc['id'], {'enable': True})
-
-
-def test_02_disable_services():
-    for svc in filter(lambda x: x['enable'], call('service.query')):
-        call('service.update', svc['id'], {'enable': False})
diff --git a/tests/api2/test_encrypted_dataset_services_restart.py b/tests/api2/test_encrypted_dataset_services_restart.py
deleted file mode 100644
index 760de61f2c214..0000000000000
--- a/tests/api2/test_encrypted_dataset_services_restart.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import contextlib
-
-import pytest
-from pytest_dependency import depends
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.pool import dataset
-
-import os
-import sys
-sys.path.append(os.getcwd())
-
-
-PASSPHRASE = 'testing123'
-
-
-@contextlib.contextmanager
-def enable_auto_start(service_name):
-    service = call('service.query', [['service', '=', service_name]], {'get': True})
-    try:
-        yield call('service.update', service['id'], {'enable': True})
-    finally:
-        call('service.update', service['id'], {'enable': False})
-
-
-@contextlib.contextmanager
-def start_service(service_name):
-    try:
-        yield call('service.start', service_name)
-    finally:
-        call('service.stop', service_name)
-
-
-@contextlib.contextmanager
-def lock_dataset(dataset_name):
-    try:
-        yield call('pool.dataset.lock', dataset_name, {'force_umount': True}, job=True)
-    finally:
-        call(
-            'pool.dataset.unlock', dataset_name, {
-                'datasets': [{'passphrase': PASSPHRASE, 'name': dataset_name}]
-            },
-            job=True,
-        )
-
-
-def test_service_restart_on_unlock_dataset(request):
-    service_name = 'smb'
-    registered_name = 'cifs'
-    with dataset('testsvcunlock', data={
-        'encryption': True,
-        'encryption_options': {
-            'algorithm': 'AES-256-GCM',
-            'pbkdf2iters': 350000,
-            'passphrase': PASSPHRASE,
-        },
-        'inherit_encryption': False
-    }) as ds:
-        path = f'/mnt/{ds}'
-        share = call(f'sharing.{service_name}.create', {'path': path, 'name': 'smb-dataset'})
-        assert share['locked'] is False
-
-        with start_service(registered_name) as service_started:
-            assert service_started is True
-
-            call('service.stop', registered_name)
-            assert call('service.started', registered_name) is False
-            with enable_auto_start(registered_name):
-                with lock_dataset(ds):
-                    assert call(f'sharing.{service_name}.get_instance', share['id'])['locked'] is True
-                    assert call('service.started', registered_name) is False
-
-                assert call(f'sharing.{service_name}.get_instance', share['id'])['locked'] is False
-                assert call('service.started', registered_name) is True
diff --git a/tests/api2/test_events.py b/tests/api2/test_events.py
deleted file mode 100644
index 23b6ab81723f4..0000000000000
--- a/tests/api2/test_events.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import errno
-
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.utils import client
-
-
-def test_can_subscribe_to_failover_status_event_without_authorization():
-    with client(auth=None) as c:
-        c.subscribe("failover.status", lambda *args, **kwargs: None)
-
-
-def test_can_not_subscribe_to_an_event_without_authorization():
-    with client(auth=None) as c:
-        with pytest.raises(CallError) as ve:
-            c.subscribe("core.get_jobs", lambda *args, **kwargs: None)
-
-        assert ve.value.errno == errno.EACCES
diff --git a/tests/api2/test_fibre_channel.py b/tests/api2/test_fibre_channel.py
deleted file mode 100644
index f878f4d003e8a..0000000000000
--- a/tests/api2/test_fibre_channel.py
+++ /dev/null
@@ -1,577 +0,0 @@
-import contextlib
-import copy
-import errno
-
-import pytest
-from assets.websocket.iscsi import target, target_extent_associate, zvol_extent
-from auto_config import ha, pool_name
-
-from middlewared.service_exception import ValidationError, ValidationErrors
-from middlewared.test.integration.utils import call, mock, ssh
-
-MB = 1024 * 1024
-
-NODE_A_0_WWPN = '0x210000aaaaaaaa01'
-NODE_A_0_WWPN_NPIV_1 = '0x220000aaaaaaaa01'
-NODE_A_0 = {
-    'name': 'host14',
-    'path': '/sys/class/fc_host/host14',
-    'node_name': '0x200000aaaaaaaa01',
-    'port_name': NODE_A_0_WWPN,
-    'port_type': 'NPort (fabric via point-to-point)',
-    'port_state': 'Online',
-    'speed': '8 Gbit',
-    'addr': 'pci0000:b2/0000:b2:00.0/0000:b3:00.0',
-    'max_npiv_vports': 254,
-    'npiv_vports_inuse': 0,
-    'physical': True,
-    'slot': 'CPU SLOT4 PCI-E 3.0 X16 / PCI Function 0'
-}
-
-NODE_A_1_WWPN = '0x210000aaaaaaaa02'
-NODE_A_1 = {
-    'name': 'host16',
-    'path': '/sys/class/fc_host/host16',
-    'node_name': '0x200000aaaaaaaa02',
-    'port_name': NODE_A_1_WWPN,
-    'port_type': 'NPort (fabric via point-to-point)',
-    'port_state': 'Online',
-    'speed': '8 Gbit',
-    'addr': 'pci0000:b2/0000:b2:00.0/0000:b3:00.1',
-    'max_npiv_vports': 254,
-    'npiv_vports_inuse': 0,
-    'physical': True,
-    'slot': 'CPU SLOT4 PCI-E 3.0 X16 / PCI Function 1'
-}
-
-NODE_A_FC_PHYSICAL_PORTS = [NODE_A_0, NODE_A_1]
-
-NODE_B_0_WWPN = '0x210000bbbbbbbb01'
-NODE_B_0_WWPN_NPIV_1 = '0x220000bbbbbbbb01'
-NODE_B_0 = {
-    'name': 'host14',
-    'path': '/sys/class/fc_host/host14',
-    'node_name': '0x200000bbbbbbbb01',
-    'port_name': NODE_B_0_WWPN,
-    'port_type': 'NPort (fabric via point-to-point)',
-    'port_state': 'Online',
-    'speed': '8 Gbit',
-    'addr': 'pci0000:b2/0000:b2:00.0/0000:b3:00.0',
-    'max_npiv_vports': 254,
-    'npiv_vports_inuse': 0,
-    'physical': True,
-    'slot': 'CPU SLOT4 PCI-E 3.0 X16 / PCI Function 0'
-}
-
-NODE_B_1_WWPN = '0x210000bbbbbbbb02'
-NODE_B_1 = {
-    'name': 'host16',
-    'path': '/sys/class/fc_host/host16',
-    'node_name': '0x200000bbbbbbbb02',
-    'port_name': NODE_B_1_WWPN,
-    'port_type': 'NPort (fabric via point-to-point)',
-    'port_state': 'Online',
-    'speed': '8 Gbit',
-    'addr': 'pci0000:b2/0000:b2:00.0/0000:b3:00.1',
-    'max_npiv_vports': 254,
-    'npiv_vports_inuse': 0,
-    'physical': True,
-    'slot': 'CPU SLOT4 PCI-E 3.0 X16 / PCI Function 1'
-}
-
-NODE_B_FC_PHYSICAL_PORTS = [NODE_B_0, NODE_B_1]
-
-
-def _str_to_naa(string):
-    if isinstance(string, str):
-        if string.startswith('0x'):
-            return 'naa.' + string[2:]
-
-
-def str_to_wwpn_naa(string):
-    return _str_to_naa(string)
-
-
-def str_to_wwpn_b_naa(string):
-    if ha:
-        return _str_to_naa(string)
-
-
-def str_to_colon_hex(string):
-    if isinstance(string, str) and string.startswith('0x'):
-        # range(2,) to skip the leading 0x
-        return ':'.join(string[i:i + 2] for i in range(2, len(string), 2))
-
-
-def parse_values(lines):
-    values = {'LUN': {}}
-    while lines:
-        line = lines.pop(0).strip()
-        if line == '}':
-            return values
-        elif line == '' or line.startswith('#'):
-            continue
-        sline = line.split()
-        if sline[0] == 'LUN':
-            values['LUN'][sline[1]] = sline[2]
-        elif len(sline) == 2:
-            values[sline[0]] = sline[1]
-
-
-def parse_targets(lines):
-    targets = {}
-    while lines:
-        line = lines.pop(0).strip()
-        if line.startswith('TARGET '):
-            ident = line.split()[1]
-            targets[ident] = parse_values(lines)
-        elif line == '}':
-            return targets
-
-
-def parse_target_driver(target_driver, lines):
-    needle = f'TARGET_DRIVER {target_driver} ' + '{'
-    while lines:
-        line = lines.pop(0).strip()
-        if line == needle:
-            targets = parse_targets(lines)
-            return targets
-
-
-def parse_qla2x00t(lines):
-    return parse_target_driver('qla2x00t', copy.copy(lines))
-
-
-def parse_iscsi(lines):
-    return parse_target_driver('iscsi', copy.copy(lines))
-
-
-@contextlib.contextmanager
-def zvol(name, volsizeMB):
-    payload = {
-        'name': f'{pool_name}/{name}',
-        'type': 'VOLUME',
-        'volsize': volsizeMB * MB,
-        'volblocksize': '16K'
-    }
-    config = call('pool.dataset.create', payload)
-    try:
-        yield config
-    finally:
-        call('pool.dataset.delete', config['id'])
-
-
-@contextlib.contextmanager
-def target_lun(target_config, zvol_name, mb, lun):
-    with zvol(zvol_name, mb) as zvol_config:
-        with zvol_extent(zvol_config['id'], zvol_name) as extent_config:
-            with target_extent_associate(target_config['id'], extent_config['id'], lun) as associate_config:
-                yield {
-                    'target': target_config,
-                    'zvol': zvol_config,
-                    'extent': extent_config,
-                    'associate': associate_config
-                }
-
-
-@contextlib.contextmanager
-def target_lun_zero(target_name, zvol_name, mb):
-    with target(target_name, []) as target_config:
-        with target_lun(target_config, zvol_name, mb, 0) as config:
-            yield config
-
-
-@contextlib.contextmanager
-def node_a_hardware(remote=False):
-    with mock('fc.fc_hosts', return_value=NODE_A_FC_PHYSICAL_PORTS, remote=remote):
-        physical_port_filter = [['physical', '=', True]]
-        with mock('fc.fc_hosts', args=physical_port_filter, return_value=NODE_A_FC_PHYSICAL_PORTS, remote=remote):
-            yield
-
-
-@contextlib.contextmanager
-def node_b_hardware(remote=False):
-    with mock('fc.fc_hosts', return_value=NODE_B_FC_PHYSICAL_PORTS, remote=remote):
-        physical_port_filter = [['physical', '=', True]]
-        with mock('fc.fc_hosts', args=physical_port_filter, return_value=NODE_B_FC_PHYSICAL_PORTS, remote=remote):
-            yield
-
-
-@contextlib.contextmanager
-def fcport_create(alias, target_id):
-    config = call('fcport.create', {'port': alias, 'target_id': target_id})
-    try:
-        yield config
-    finally:
-        call('fcport.delete', config['id'])
-
-
-class TestFixtureFibreChannel:
-    """Fixture with Fibre Channel"""
-
-    @pytest.fixture(scope='class')
-    def fibre_channel_hardware(self):
-        # Make sure iSCSI service is not running.  Would go boom
-        assert call('service.query', [['service', '=', 'iscsitarget']], {'get': True})['state'] == 'STOPPED'
-        with mock('fc.capable', return_value=True):
-            with mock('system.feature_enabled', args=['FIBRECHANNEL',], return_value=True):
-                call('fc.fc_host.reset_wired', True)
-                if ha:
-                    node = call('failover.node')
-                    if node == 'A':
-                        with node_a_hardware():
-                            with node_b_hardware(True):
-                                yield
-                    else:
-                        with node_a_hardware(True):
-                            with node_b_hardware():
-                                yield
-                else:
-                    with node_a_hardware():
-                        yield
-
-    @pytest.fixture(scope='class')
-    def fibre_channel_wired(self, fibre_channel_hardware):
-        """
-        Wire the mocked FC ports together.
-
-        Note that this will only work once during a middleware run.
-        (There are some exceptions, but these don't apply to CI.)
-        """
-        assert call('fcport.query') == []
-        try:
-            yield
-        finally:
-            for fc in call('fc.fc_host.query'):
-                call('fc.fc_host.delete', fc['id'])
-
-    @pytest.fixture(scope='class')
-    def fc_hosts(self, fibre_channel_wired):
-        yield sorted(call('fc.fc_host.query'), key=lambda d: d['alias'])
-
-    def assert_fc_host(self, fc_host, alias, wwpn, wwpn_b, npiv):
-        assert fc_host['alias'] == alias
-        assert fc_host['wwpn'] == str_to_wwpn_naa(wwpn)
-        if wwpn_b is None:
-            assert fc_host['wwpn_b'] is None
-        else:
-            assert fc_host['wwpn_b'] == str_to_wwpn_b_naa(wwpn_b)
-        assert fc_host['npiv'] == npiv
-
-    def test_wired(self, fc_hosts):
-        assert len(fc_hosts) == 2
-        if ha:
-            self.assert_fc_host(fc_hosts[0], 'fc0', NODE_A_0_WWPN, NODE_B_0_WWPN, 0)
-            self.assert_fc_host(fc_hosts[1], 'fc1', NODE_A_1_WWPN, NODE_B_1_WWPN, 0)
-        else:
-            self.assert_fc_host(fc_hosts[0], 'fc0', NODE_A_0_WWPN, None, 0)
-            self.assert_fc_host(fc_hosts[1], 'fc1', NODE_A_1_WWPN, None, 0)
-        self.fc_hosts = fc_hosts
-
-    def test_target(self, fc_hosts):
-        with target_lun_zero('fctarget0', 'fcextent0', 100) as config:
-            target_id = config['target']['id']
-
-            # The target was created with mode ISCSI.  Ensure we can't use that.
-            with pytest.raises(ValidationErrors) as ve:
-                call('fcport.create', {'port': fc_hosts[0]['alias'], 'target_id': target_id})
-            assert ve.value.errors == [
-                ValidationError(
-                    'fcport_create.target_id',
-                    f'Specified target "fctarget0" ({target_id}) does not have a "mode" (ISCSI) that permits FC access',
-                    errno.EINVAL,
-                )
-            ]
-
-            # Change the mode of the target
-            call('iscsi.target.update', target_id, {'mode': 'FC'})
-
-            # Now we should be able to successfully map the target
-            with fcport_create(fc_hosts[0]['alias'], target_id) as map0:
-                maps = call('fcport.query')
-                assert len(maps) == 1
-                assert maps[0] == map0
-
-                # Let's generate the /etc/scst.conf and make sure it looks OK
-                call('etc.generate', 'scst')
-                lines = ssh("cat /etc/scst.conf").splitlines()
-                scst_qla_targets = parse_qla2x00t(lines)
-                # The 2nd physical port will also be written, albeit disabled
-                assert len(scst_qla_targets) == 2
-                rel_tgt_id_node_offset = 0
-                if ha:
-                    node = call('failover.node')
-                    if node == 'A':
-                        key0 = str_to_colon_hex(NODE_A_0_WWPN)
-                        key1 = str_to_colon_hex(NODE_A_1_WWPN)
-                    else:
-                        key0 = str_to_colon_hex(NODE_B_0_WWPN)
-                        key1 = str_to_colon_hex(NODE_B_1_WWPN)
-                        if call('iscsi.global.alua_enabled'):
-                            rel_tgt_id_node_offset = 32000
-                else:
-                    key0 = str_to_colon_hex(NODE_A_0_WWPN)
-                    key1 = str_to_colon_hex(NODE_A_1_WWPN)
-                assert key0 in scst_qla_targets
-                assert scst_qla_targets[key0] == {
-                    'LUN': {'0': 'fcextent0'},
-                    'enabled': '1',
-                    'rel_tgt_id': str(5001 + rel_tgt_id_node_offset)
-                }
-                assert key1 in scst_qla_targets
-                assert scst_qla_targets[key1] == {
-                    'LUN': {},
-                    'enabled': '0',
-                    'rel_tgt_id': '10000'
-                }
-
-                # OK, now let's create another FC target
-                with target_lun_zero('fctarget2', 'fcextent2', 200) as config2:
-                    target2_id = config2['target']['id']
-                    # Change the mode of the target
-                    call('iscsi.target.update', target2_id, {'mode': 'BOTH'})
-
-                    # Make sure we can't create a new fcport using the in-use port
-                    with pytest.raises(ValidationErrors) as ve:
-                        call('fcport.create', {'port': fc_hosts[0]['alias'], 'target_id': target2_id})
-                    assert ve.value.errors == [
-                        ValidationError(
-                            'fcport_create.port',
-                            'Object with this port already exists',
-                            errno.EINVAL,
-                        )
-                    ]
-
-                    # Make sure we can't create a new fcport using the in-use target
-                    with pytest.raises(ValidationErrors) as ve:
-                        call('fcport.create', {'port': fc_hosts[1]['alias'], 'target_id': target_id})
-                    assert ve.value.errors == [
-                        ValidationError(
-                            'fcport_create.target_id',
-                            'Object with this target_id already exists',
-                            errno.EINVAL,
-                        )
-                    ]
-
-                    # OK, now map the 2nd target
-                    with fcport_create(fc_hosts[1]['alias'], target2_id) as map1:
-                        maps = call('fcport.query')
-                        assert len(maps) == 2
-                        assert (maps[0] == map0 and maps[1] == map1) or (maps[0] == map1 and maps[1] == map0)
-
-                        # Let's regenerate the /etc/scst.conf and just make sure it has the expected targets
-                        call('etc.generate', 'scst')
-                        lines = ssh("cat /etc/scst.conf").splitlines()
-                        # Check FC targets
-                        scst_qla_targets = parse_qla2x00t(lines)
-                        assert len(scst_qla_targets) == 2
-                        assert key0 in scst_qla_targets
-                        assert scst_qla_targets[key0] == {
-                            'LUN': {'0': 'fcextent0'},
-                            'enabled': '1',
-                            'rel_tgt_id': str(5001 + rel_tgt_id_node_offset)
-                        }
-                        assert key1 in scst_qla_targets
-                        assert scst_qla_targets[key1] == {
-                            'LUN': {'0': 'fcextent2'},
-                            'enabled': '1',
-                            'rel_tgt_id': str(5002 + rel_tgt_id_node_offset)
-                        }
-                        # Check iSCSI target
-                        iqn2 = 'iqn.2005-10.org.freenas.ctl:fctarget2'
-                        iscsi_targets = parse_iscsi(lines)
-                        assert len(iscsi_targets) == 1
-                        assert iqn2 in iscsi_targets
-                        assert iscsi_targets[iqn2] == {
-                            'LUN': {'0': 'fcextent2'},
-                            'rel_tgt_id': str(2 + rel_tgt_id_node_offset),
-                            'enabled': '1',
-                            'per_portal_acl': '1'
-                        }
-
-                        # Make sure we can't update the old fcport using the in-use port
-                        with pytest.raises(ValidationErrors) as ve:
-                            call('fcport.update', map0['id'], {'port': fc_hosts[1]['alias']})
-                        assert ve.value.errors == [
-                            ValidationError(
-                                'fcport_update.port',
-                                'Object with this port already exists',
-                                errno.EINVAL,
-                            )
-                        ]
-
-                        # Make sure we can't update the old fcport using the in-use target
-                        with pytest.raises(ValidationErrors) as ve:
-                            call('fcport.update', map0['id'], {'target_id': target2_id})
-                        assert ve.value.errors == [
-                            ValidationError(
-                                'fcport_update.target_id',
-                                'Object with this target_id already exists',
-                                errno.EINVAL,
-                            )
-                        ]
-
-                        # OK, now let's create a third FC target
-                        with target_lun_zero('fctarget3', 'fcextent3', 300) as config3:
-                            target3_id = config3['target']['id']
-                            call('iscsi.target.update', target3_id, {'mode': 'FC'})
-
-                            # Make sure we CAN update the old fcport to this target
-                            assert call('fcport.update', map0['id'], {'target_id': target3_id})['target']['id'] == target3_id
-                            # Then put is back
-                            assert call('fcport.update', map0['id'], {'target_id': target_id})['target']['id'] == target_id
-
-                    # We've just left the context where the 2nd fcport was created
-                    # So now ensure we CAN update the old fcport to this port
-                    assert call('fcport.update', map0['id'], {'port': fc_hosts[1]['alias']})['port'] == fc_hosts[1]['alias']
-                    # Then put is back
-                    assert call('fcport.update', map0['id'], {'port': fc_hosts[0]['alias']})['port'] == fc_hosts[0]['alias']
-
-    def test_npiv_setting(self, fc_hosts):
-        # Try to set NPIV to -1
-        with pytest.raises(ValidationErrors) as ve:
-            call('fc.fc_host.update', fc_hosts[0]['id'], {'npiv': -1})
-        assert ve.value.errors == [
-            ValidationError(
-                'fc_host_update.npiv',
-                'Invalid npiv (-1) supplied, must be 0 or greater',
-                errno.EINVAL,
-            )
-        ]
-
-        # Try to set NPIV to too large a value (3000)
-        with pytest.raises(ValidationErrors) as ve:
-            call('fc.fc_host.update', fc_hosts[0]['id'], {'npiv': 3000})
-        assert ve.value.errors == [
-            ValidationError(
-                'fc_host_update.npiv',
-                'Invalid npiv (3000) supplied, max value 254',
-                errno.EINVAL,
-            )
-        ]
-
-        # Make sure fcport.port_choices looks correct
-        assert call('fcport.port_choices') == {
-            'fc0': {
-                'wwpn': str_to_wwpn_naa(NODE_A_0_WWPN),
-                'wwpn_b': str_to_wwpn_b_naa(NODE_B_0_WWPN)
-            },
-            'fc1': {
-                'wwpn': str_to_wwpn_naa(NODE_A_1_WWPN),
-                'wwpn_b': str_to_wwpn_b_naa(NODE_B_1_WWPN)
-            }
-        }
-
-        # Now set it to a valid value (4)
-        call('fc.fc_host.update', fc_hosts[0]['id'], {'npiv': 4})
-
-        # Read things back with a couple of queries to test those.
-        fc0 = call('fc.fc_host.query', [['wwpn', '=', str_to_wwpn_naa(NODE_A_0_WWPN)]], {'get': True})
-        assert fc0['npiv'] == 4
-        if ha:
-            fc1 = call('fc.fc_host.query', [['wwpn_b', '=', str_to_wwpn_b_naa(NODE_B_1_WWPN)]], {'get': True})
-            assert fc1['npiv'] == 0
-        else:
-            fc1 = call('fc.fc_host.query', [['wwpn', '=', str_to_wwpn_naa(NODE_A_1_WWPN)]], {'get': True})
-            assert fc1['npiv'] == 0
-
-        # Increase to a valid value (5)
-        call('fc.fc_host.update', fc_hosts[0]['id'], {'npiv': 5})
-        fc0 = call('fc.fc_host.query', [['alias', '=', 'fc0']], {'get': True})
-        assert fc0['npiv'] == 5
-
-        # Reduce to a valid value (1)
-        call('fc.fc_host.update', fc_hosts[0]['id'], {'npiv': 1})
-        fc0 = call('fc.fc_host.query', [['wwpn', '=', str_to_wwpn_naa(NODE_A_0_WWPN)]], {'get': True})
-        assert fc0['npiv'] == 1
-
-        # Make sure fcport.port_choices looks correct
-        assert call('fcport.port_choices') == {
-            'fc0': {
-                'wwpn': str_to_wwpn_naa(NODE_A_0_WWPN),
-                'wwpn_b': str_to_wwpn_b_naa(NODE_B_0_WWPN)
-            },
-            'fc0/1': {
-                'wwpn': str_to_wwpn_naa(NODE_A_0_WWPN_NPIV_1),
-                'wwpn_b': str_to_wwpn_b_naa(NODE_B_0_WWPN_NPIV_1)
-            },
-            'fc1': {
-                'wwpn': str_to_wwpn_naa(NODE_A_1_WWPN),
-                'wwpn_b': str_to_wwpn_b_naa(NODE_B_1_WWPN)
-            }
-        }
-
-        with target_lun_zero('fctarget1', 'fcextent1', 100) as config:
-            # The target was created as an ISCSI target.  We should not be able
-            # to map it to a FC port.
-            target_id = config['target']['id']
-            call('iscsi.target.update', target_id, {'mode': 'BOTH'})
-
-            with fcport_create('fc0/1', target_id):
-                # Check that we can NOT now reduce npiv to zero
-                with pytest.raises(ValidationErrors) as ve:
-                    call('fc.fc_host.update', fc_hosts[0]['id'], {'npiv': 0})
-                assert ve.value.errors == [
-                    ValidationError(
-                        'fc_host_update.npiv',
-                        'Invalid npiv (0) supplied, fc0/1 is currently mapped to a target',
-                        errno.EINVAL,
-                    )
-                ]
-
-                # Let's also make sure that the /etc/scst.conf looks right when an
-                # NPIV mapped target is present
-                call('etc.generate', 'scst')
-                lines = ssh("cat /etc/scst.conf").splitlines()
-                scst_qla_targets = parse_qla2x00t(lines)
-                assert len(scst_qla_targets) == 3
-                rel_tgt_id_node_offset = 0
-                if ha:
-                    node = call('failover.node')
-                    if node == 'A':
-                        key0 = str_to_colon_hex(NODE_A_0_WWPN)
-                        key1 = str_to_colon_hex(NODE_A_1_WWPN)
-                        key2 = str_to_colon_hex(NODE_A_0_WWPN_NPIV_1)
-                    else:
-                        key0 = str_to_colon_hex(NODE_B_0_WWPN)
-                        key1 = str_to_colon_hex(NODE_B_1_WWPN)
-                        key2 = str_to_colon_hex(NODE_B_0_WWPN_NPIV_1)
-                        if call('iscsi.global.alua_enabled'):
-                            rel_tgt_id_node_offset = 32000
-                else:
-                    key0 = str_to_colon_hex(NODE_A_0_WWPN)
-                    key1 = str_to_colon_hex(NODE_A_1_WWPN)
-                    key2 = str_to_colon_hex(NODE_A_0_WWPN_NPIV_1)
-                assert key0 in scst_qla_targets
-                assert scst_qla_targets[key0] == {
-                    'LUN': {},
-                    'enabled': '0',
-                    'rel_tgt_id': '10000'
-                }
-                assert key1 in scst_qla_targets
-                assert scst_qla_targets[key1] == {
-                    'LUN': {},
-                    'enabled': '0',
-                    'rel_tgt_id': '10001'
-                }
-                assert key2 in scst_qla_targets
-                assert scst_qla_targets[key2] == {
-                    'LUN': {'0': 'fcextent1'},
-                    'enabled': '1',
-                    'rel_tgt_id': str(5001 + rel_tgt_id_node_offset)
-                }
-
-            # NPIV target no longer mapped.  Now reduce npiv to zero
-            call('fc.fc_host.update', fc_hosts[0]['id'], {'npiv': 0})
-
-            # Make sure fcport.port_choices looks correct
-            assert call('fcport.port_choices') == {
-                'fc0': {
-                    'wwpn': str_to_wwpn_naa(NODE_A_0_WWPN),
-                    'wwpn_b': str_to_wwpn_b_naa(NODE_B_0_WWPN)
-                },
-                'fc1': {
-                    'wwpn': str_to_wwpn_naa(NODE_A_1_WWPN),
-                    'wwpn_b': str_to_wwpn_b_naa(NODE_B_1_WWPN)
-                }
-            }
diff --git a/tests/api2/test_filesystem__file_tail_follow.py b/tests/api2/test_filesystem__file_tail_follow.py
deleted file mode 100644
index a4508465d4402..0000000000000
--- a/tests/api2/test_filesystem__file_tail_follow.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import time
-
-import pytest
-
-from middlewared.test.integration.utils import client, ssh
-
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_filesystem__file_tail_follow__grouping():
-    ssh("echo > /tmp/file_tail_follow.txt")
-
-    with client() as c:
-        received = []
-
-        def append(type, **kwargs):
-            received.append((time.monotonic(), kwargs["fields"]["data"]))
-
-        c.subscribe("filesystem.file_tail_follow:/tmp/file_tail_follow.txt", append)
-
-        ssh("for i in `seq 1 200`; do echo test >> /tmp/file_tail_follow.txt; sleep 0.01; done")
-
-        # Settle down things
-        time.sleep(1)
-
-        received = received[1:]  # Initial file contents
-        # We were sending this for 2-3 seconds, so we should have received 4-6 blocks with 0.5 sec interval
-        assert 4 <= len(received) <= 6, str(received)
-        # All blocks should have been received uniformly in time
-        assert all(0.4 <= b2[0] - b1[0] <= 1.0 for b1, b2 in zip(received[:-1], received[1:])), str(received)
-        # All blocks should contain more or less same amount of data
-        assert all(len(block[1].split("\n")) <= 60 for block in received[:-1]), str(received)
-
-        # One single send
-        ssh("echo finish >> /tmp/file_tail_follow.txt")
-
-        time.sleep(1)
-        assert received[-1][1] == "finish\n"
diff --git a/tests/api2/test_filesystem__put.py b/tests/api2/test_filesystem__put.py
deleted file mode 100644
index 5be034b9c7f01..0000000000000
--- a/tests/api2/test_filesystem__put.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import json
-import os
-import sys
-import tempfile
-
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from functions import wait_on_job, POST
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-def upload_file(file_path, file_path_on_tn):
-    data = {'method': 'filesystem.put', 'params': [file_path_on_tn]}
-    with open(file_path, 'rb') as f:
-        response = POST(
-            '/_upload/',
-            files={'data': json.dumps(data), 'file': f},
-            use_ip_only=True,
-            force_new_headers=True,
-        )
-
-    job_id = json.loads(response.text)['job_id']
-    return wait_on_job(job_id, 300)
-
-
-def file_exists(file_path):
-    return any(
-        entry for entry in call('filesystem.listdir', os.path.dirname(file_path))
-        if entry['name'] == os.path.basename(file_path) and entry['type'] == 'FILE'
-    )
-
-
-def test_put_file():
-    upload_file_impl(False)
-
-
-def test_put_file_in_locked_dataset():
-    upload_file_impl(True)
-
-
-def upload_file_impl(lock):
-    with tempfile.NamedTemporaryFile(mode='w') as f:
-        f.write('filesystem.put test')
-        f.flush()
-
-        with dataset(
-            'test_filesystem_put', data={
-                'encryption': True,
-                'inherit_encryption': False,
-                'encryption_options': {'passphrase': '12345678'}
-            },
-        ) as test_dataset:
-            if lock:
-                call('pool.dataset.lock', test_dataset, job=True)
-            file_path_on_tn = f'/mnt/{test_dataset}/testfile'
-            job_detail = upload_file(f.name,file_path_on_tn)
-            assert job_detail['results']['state'] == ('FAILED' if lock else 'SUCCESS')
-            assert file_exists(file_path_on_tn) is not lock
diff --git a/tests/api2/test_filesystem_setperm_strip_acl.py b/tests/api2/test_filesystem_setperm_strip_acl.py
deleted file mode 100644
index 294644b130c5c..0000000000000
--- a/tests/api2/test_filesystem_setperm_strip_acl.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import os
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-def test__strip_acl_setperm():
-    """ verify ACL can be stripped on single file by explicity specifying strip """
-    with dataset('stripacl_test', {'share_type': 'SMB'}) as ds:
-        mp = os.path.join('/mnt', ds)
-
-        dir_path = os.path.join(mp, 'thedir')
-        assert call('filesystem.stat', mp)['acl']
-
-        call('filesystem.mkdir', {'path': dir_path, 'options': {'raise_chmod_error': False}})
-        assert call('filesystem.stat', dir_path)['acl']
-
-        # nonrecursive
-        call('filesystem.setperm', {'path': mp, 'options': {'stripacl': True}}, job=True)
-
-        # target for setperm should not have ACL anymore
-        assert not call('filesystem.stat', mp)['acl']
-
-        # but directory should
-        assert call('filesystem.stat', dir_path)['acl']
-
-        # recursive
-        call('filesystem.setperm', {'path': mp, 'options': {'stripacl': True, 'recursive': True}}, job=True)
-        assert not call('filesystem.stat', dir_path)['acl']
diff --git a/tests/api2/test_ftp_crud_roles.py b/tests/api2/test_ftp_crud_roles.py
deleted file mode 100644
index 5e92a1a8d9cb9..0000000000000
--- a/tests/api2/test_ftp_crud_roles.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_FTP_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "ftp.config", role, True, valid_role_exception=False)
-    common_checks(unprivileged_user_fixture, "ftp.connection_count", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_FTP_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "ftp.update", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_FTP_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "ftp.update", role, True)
-    common_checks(
-        unprivileged_user_fixture, "service.start", role, True, method_args=["ftp"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.restart", role, True, method_args=["ftp"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.reload", role, True, method_args=["ftp"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.stop", role, True, method_args=["ftp"], valid_role_exception=False
-    )
diff --git a/tests/api2/test_group_utils.py b/tests/api2/test_group_utils.py
deleted file mode 100644
index 4c52902d3b72c..0000000000000
--- a/tests/api2/test_group_utils.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.account import group, user
-
-
-def test_root_password_disabled():
-    with group({"name": "group1"}) as g1:
-        with group({"name": "group2"}) as g2:
-            with user({
-                "username": "test",
-                "full_name": "Test",
-                "group_create": True,
-                "groups": [g1["id"], g2["id"]],
-                "password": "test1234",
-            }) as u:
-                result = call("group.get_password_enabled_users", [g1["gid"], g2["gid"]], [])
-                assert len(result) == 1
-                assert result[0]["id"] == u["id"]
diff --git a/tests/api2/test_groupmap_migrate_share.py b/tests/api2/test_groupmap_migrate_share.py
deleted file mode 100644
index 5b9e916d239e1..0000000000000
--- a/tests/api2/test_groupmap_migrate_share.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import os
-import pytest
-import json
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call, ssh
-
-SMB_NAME = 'groupmap_migrate'
-RO_ADMINS = 'truenas_readonly_administrators'
-
-
-@pytest.fixture(scope='module')
-def do_setup():
-    with dataset('groupmap-migrate', data={'share_type': 'SMB'}) as ds:
-        with smb_share(os.path.join('/mnt', ds), SMB_NAME) as s:
-            ro = call('group.query', [['group', '=', RO_ADMINS]], {'get': True})
-            acl = call('sharing.smb.setacl', {
-                'share_name': SMB_NAME,
-                'share_acl': [{
-                    'ae_who_id': {'id_type': 'GROUP', 'id': ro['gid']},
-                    'ae_perm': 'READ',
-                    'ae_type': 'ALLOWED'
-                }]
-            })
-            yield {'dataset': ds, 'share': s, 'acl': acl, 'group': ro}
-
-
-def test_groupmap_migrate(do_setup):
-    assert do_setup['acl']['share_name'] == SMB_NAME
-    assert do_setup['acl']['share_acl'][0]['ae_perm'] == 'READ'
-    assert do_setup['acl']['share_acl'][0]['ae_who_sid'] == do_setup['group']['sid']
-
-    # first delete existing groupmap
-    ssh(f'net groupmap delete ntgroup={RO_ADMINS}')
-
-    # Adding it back will force auto-allocation from low RID range
-    ssh(f'net groupmap add ntgroup={RO_ADMINS} unixgroup={RO_ADMINS}')
-
-    groupmap = json.loads(ssh('net groupmap list --json'))
-    sid = None
-    for entry in groupmap['groupmap']:
-        if entry['gid'] != do_setup['group']['gid']:
-            continue
-
-        sid = entry['sid']
-
-    # Make sure we have an actually different sid in the groupmap
-    assert sid != do_setup['group']['sid']
-
-    # first update ACL to have mapping to new sid
-    call('smb.sharesec.setacl', {'share_name': SMB_NAME, 'share_acl': [{
-        'ae_who_sid': sid,
-        'ae_perm': 'READ',
-        'ae_type': 'ALLOWED'
-    }]})
-
-    # make sure it's actually set
-    new_acl = call('smb.sharesec.getacl', SMB_NAME)
-    assert new_acl['share_acl'][0]['ae_who_sid'] == sid
-
-    # We catch inconsistency when dumping groupmap and auto-migrate at that time
-    call('smb.groupmap_list')
-
-    new_acl = call('smb.sharesec.getacl', SMB_NAME)
-    assert new_acl['share_acl'][0]['ae_who_sid'] == do_setup['group']['sid']
diff --git a/tests/api2/test_idmap.py b/tests/api2/test_idmap.py
deleted file mode 100644
index df9725d610abb..0000000000000
--- a/tests/api2/test_idmap.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call
-
-try:
-    from config import (
-        LDAPBASEDN,
-        LDAPBINDDN,
-        LDAPBINDPASSWORD,
-        LDAPHOSTNAME,
-    )
-except ImportError:
-    Reason = 'LDAP* variable are not setup in config.py'
-    # comment pytestmark for development testing with --dev-test
-    pytestmark = pytest.mark.skipif(True, reason=Reason)
-
-
-def test_create_and_delete_idmap_certificate():
-    payload = {
-        'name': 'BOB.NB',
-        'range_low': 1000,
-        'range_high': 2000,
-        'certificate': 1,
-        'idmap_backend': 'RFC2307',
-        'options': {
-            'ldap_server': 'STANDALONE',
-            'bind_path_user': LDAPBASEDN,
-            'bind_path_group': LDAPBASEDN,
-            'ldap_url': LDAPHOSTNAME,
-            'ldap_user_dn': LDAPBINDDN,
-            'ldap_user_dn_password': LDAPBINDPASSWORD,
-            'ssl': 'ON',
-            'ldap_realm': False,
-        }
-    }
-    idmap_id = call('idmap.create', payload)['id']
-
-    call('idmap.delete', idmap_id)
-    assert call('idmap.query', [['id', '=', idmap_id]]) == []
diff --git a/tests/api2/test_initshutdownscript.py b/tests/api2/test_initshutdownscript.py
deleted file mode 100644
index 91cf4af44c259..0000000000000
--- a/tests/api2/test_initshutdownscript.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import base64
-import contextlib
-import errno
-import stat
-import time
-
-import pytest
-
-from middlewared.test.integration.utils import client, ssh
-from middlewared.service_exception import ValidationErrors, ValidationError
-
-TEST_SCRIPT_FILE = '/root/.TEST_SCRIPT_FILE'
-_775 = stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IXOTH
-
-
-@pytest.fixture(scope='module')
-def ws_client():
-    with client() as c:
-        yield c
-
-
-@contextlib.contextmanager
-def initshutudown_script(ws_client, contents, extra=None):
-    extra = extra or {}
-
-    ws_client.call(
-        'filesystem.file_receive',
-        TEST_SCRIPT_FILE,
-        base64.b64encode(contents.encode('utf-8')).decode(),
-        {'mode': _775},
-    )
-    script = ws_client.call(
-        'initshutdownscript.create',
-        {
-            'type': 'SCRIPT',
-            'script': TEST_SCRIPT_FILE,
-            'when': 'PREINIT',
-            **extra,
-        }
-    )
-    try:
-        yield script
-    finally:
-        ws_client.call('initshutdownscript.delete', script['id'])
-
-
-def test_initshutudown_script(ws_client):
-    with initshutudown_script(ws_client, 'echo "testing"') as script:
-        _id = script['id']
-        filters = [['id', '=', _id]]
-        opts = {'get': True}
-
-        # verify
-        assert ws_client.call('initshutdownscript.query', filters, opts)['script'] == TEST_SCRIPT_FILE
-
-        # add a comment
-        ws_client.call('initshutdownscript.update', _id, {'comment': 'test_comment'})
-        assert ws_client.call('initshutdownscript.query', filters, opts)['comment'] == 'test_comment'
-
-        # disable it
-        ws_client.call('initshutdownscript.update', _id, {'enabled': False})
-        assert ws_client.call('initshutdownscript.query', filters, opts)['enabled'] is False
-
-    assert not ws_client.call('initshutdownscript.query', filters)
-
-
-def test_initshutdown_script_bad(ws_client):
-    bad_script = f'/root/nonexistent-script'
-    with pytest.raises(ValidationErrors) as e:
-        ws_client.call(
-            'initshutdownscript.create',
-            {
-                'type': 'SCRIPT',
-                'script': bad_script,
-                'when': 'PREINIT',
-            }
-        )
-
-    assert e.value.errors == [
-        ValidationError(
-            'init_shutdown_script_create.script',
-            f'Path {bad_script} not found',
-            errno.ENOENT
-        )
-    ]
-
-
-def test_initshutdownscript_success(ws_client):
-    ssh("rm /tmp/flag", check=False)
-
-    with initshutudown_script(ws_client, 'echo ok > /tmp/flag'):
-        ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True)
-
-    assert ssh("cat /tmp/flag") == "ok\n"
-
-
-def test_initshutdownscript_timeout(ws_client):
-    ssh("rm /tmp/flag", check=False)
-
-    with initshutudown_script(ws_client, 'sleep 10', {"timeout": 2}):
-        start = time.monotonic()
-        ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True)
-
-        assert time.monotonic() - start < 5
-
-    assert f"Timed out running SCRIPT: {TEST_SCRIPT_FILE!r}" in ssh("cat /var/log/middlewared.log")
-
-
-def test_initshutdownscript_failure(ws_client):
-    ssh("rm /tmp/flag", check=False)
-
-    with initshutudown_script(ws_client, 'echo everything went wrong > /dev/stderr; exit 1'):
-        ws_client.call('initshutdownscript.execute_init_tasks', 'PREINIT', job=True)
-
-    assert (
-        f"Failed to execute 'exec {TEST_SCRIPT_FILE}' with error 'everything went wrong\\n'" in
-        ssh("cat /var/log/middlewared.log")
-    )
diff --git a/tests/api2/test_ipa_join.py b/tests/api2/test_ipa_join.py
deleted file mode 100644
index 44026cf75eeb6..0000000000000
--- a/tests/api2/test_ipa_join.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.directory_service import ipa, FREEIPA_ADMIN_BINDPW
-from middlewared.test.integration.assets.product import product_type
-from middlewared.test.integration.utils import call, client
-from middlewared.test.integration.utils.client import truenas_server
-
-
-@pytest.fixture(scope="module")
-def do_freeipa_connection():
-    with ipa() as config:
-        yield config
-
-
-@pytest.fixture(scope="function")
-def override_product():
-    if truenas_server.server_type == 'ENTERPRISE_HA':
-        yield
-    else:
-        with product_type():
-            yield
-
-
-@pytest.fixture(scope="function")
-def enable_ds_auth(override_product):
-    sys_config = call('system.general.update', {'ds_auth': True})
-    try:
-        yield sys_config
-    finally:
-        call('system.general.update', {'ds_auth': False})
-
-
-def test_setup_and_enabling_freeipa(do_freeipa_connection):
-    config = do_freeipa_connection
-
-    ds = call('directoryservices.status')
-    assert ds['type'] == 'IPA'
-    assert ds['status'] == 'HEALTHY'
-
-    alerts = [alert['klass'] for alert in call('alert.list')]
-
-    # There's a one-shot alert that gets fired if we are an IPA domain
-    # connected via legacy mechanism.
-    assert 'IPALegacyConfiguration' not in alerts
-
-    assert config['kerberos_realm'], str(config)
-    assert config['kerberos_principal'], str(config)
-
-    # our kerberos principal should be the host one (not SMB or NFS)
-    assert config['kerberos_principal'].startswith('host/')
-
-
-def test_accounts_cache(do_freeipa_connection):
-    ipa_users_cnt = call('user.query', [['local', '=', False]], {'count': True})
-    assert ipa_users_cnt != 0
-
-    ipa_groups_cnt = call('group.query', [['local', '=', False]], {'count': True})
-    assert ipa_groups_cnt != 0
-
-
-@pytest.mark.parametrize('keytab_name', [
-    'IPA_MACHINE_ACCOUNT',
-    'IPA_NFS_KEYTAB',
-    'IPA_SMB_KEYTAB'
-])
-def test_keytabs_exist(do_freeipa_connection, keytab_name):
-    call('kerberos.keytab.query', [['name', '=', keytab_name]], {'get': True})
-
-
-def test_check_kerberos_ticket(do_freeipa_connection):
-    tkt = call('kerberos.check_ticket')
-
-    assert tkt['name_type'] == 'KERBEROS_PRINCIPAL'
-    assert tkt['name'].startswith(do_freeipa_connection['kerberos_principal'])
-
-
-def test_certificate(do_freeipa_connection):
-    call('certificateauthority.query', [['name', '=', 'IPA_DOMAIN_CACERT']], {'get': True})
-
-
-def test_system_keytab_has_nfs_principal(do_freeipa_connection):
-    assert call('kerberos.keytab.has_nfs_principal')
-
-
-def test_smb_keytab_exists(do_freeipa_connection):
-    call('filesystem.stat', '/etc/ipa/smb.keytab')
-
-
-def test_admin_privilege(do_freeipa_connection, enable_ds_auth):
-    ipa_config = call('ldap.ipa_config')
-
-    priv_names = [priv['name'] for priv in call('privilege.query')]
-    assert ipa_config['domain'].upper() in priv_names
-
-    priv = call('privilege.query', [['name', '=', ipa_config['domain'].upper()]], {'get': True})
-    admins_grp = call('group.get_group_obj', {'groupname': 'admins', 'sid_info': True})
-
-    assert len(priv['ds_groups']) == 1
-    assert priv['ds_groups'][0]['gid'] == admins_grp['gr_gid']
-    assert priv['ds_groups'][0]['sid'] == admins_grp['sid']
-
-    assert priv['roles'] == ['FULL_ADMIN']
-
-    with client(auth=('ipaadmin', FREEIPA_ADMIN_BINDPW)) as c:
-        me = c.call('auth.me')
-
-        assert 'DIRECTORY_SERVICE' in me['account_attributes']
-        assert 'LDAP' in me['account_attributes']
-        assert me['privilege']['roles'] == set(priv['roles'])
-
-
-def test_dns_resolution(do_freeipa_connection):
-    ipa_config = do_freeipa_connection['ipa_config']
-
-    addresses = call('dnsclient.forward_lookup', {'names': [ipa_config['host']]})
-    assert len(addresses) != 0
diff --git a/tests/api2/test_ipa_leave.py b/tests/api2/test_ipa_leave.py
deleted file mode 100644
index b90fe7bd2de9e..0000000000000
--- a/tests/api2/test_ipa_leave.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import errno
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.directory_service import ipa
-from middlewared.test.integration.utils import call
-
-
-@pytest.fixture(scope="module")
-def ipa_config():
-    """ join then leave IPA domain so that we can evaluate server after leaving the IPA domain """
-    with ipa() as config:
-        ipa_config = config['ipa_config']
-
-    yield ipa_config
-
-
-def test_cache_cleared(ipa_config):
-    ipa_users_cnt = call('user.query', [['local', '=', False]], {'count': True})
-    assert ipa_users_cnt == 0
-
-    ipa_groups_cnt = call('group.query', [['local', '=', False]], {'count': True})
-    assert ipa_groups_cnt == 0
-
-
-@pytest.mark.parametrize('keytab_name', [
-    'IPA_MACHINE_ACCOUNT',
-    'IPA_NFS_KEYTAB',
-    'IPA_SMB_KEYTAB'
-])
-def test_keytabs_deleted(ipa_config, keytab_name):
-    kt = call('kerberos.keytab.query', [['name', '=', keytab_name]])
-    assert len(kt) == 0
-
-
-def test_check_no_kerberos_ticket(ipa_config):
-    with pytest.raises(CallError) as ce:
-        call('kerberos.check_ticket')
-
-    assert ce.value.errno == errno.ENOKEY
-
-
-def test_check_no_kerberos_realm(ipa_config):
-    realms = call('kerberos.realm.query')
-    assert len(realms) == 0, str(realms)
-
-
-def test_system_keytab_has_no_nfs_principal(ipa_config):
-    assert not call('kerberos.keytab.has_nfs_principal')
-
-
-def test_smb_keytab_does_not_exist(ipa_config):
-    with pytest.raises(CallError) as ce:
-        call('filesystem.stat', '/etc/ipa/smb.keytab')
-
-    assert ce.value.errno == errno.ENOENT
-
-
-def test_no_admin_privilege(ipa_config):
-    priv = call('privilege.query', [['name', '=', ipa_config['domain'].upper()]])
-    assert priv == []
-
-
-def test_no_certificate(ipa_config):
-    certs = call('certificateauthority.query', [['name', '=', 'IPA_DOMAIN_CACERT']])
-    assert len(certs) == 0, str(certs)
-
-
-def test_no_dns_resolution(ipa_config):
-    try:
-        results = call('dnsclient.forward_lookup', {'names': [ipa_config['host']]})
-        assert len(results) == 0
-    except Exception:
-        pass
diff --git a/tests/api2/test_iscsi.py b/tests/api2/test_iscsi.py
deleted file mode 100644
index d4e9b7f9faabc..0000000000000
--- a/tests/api2/test_iscsi.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.iscsi import iscsi_extent
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-def test__iscsi_extent__disk_choices(request):
-    with dataset("test zvol", {"type": "VOLUME", "volsize": 1048576}) as ds:
-        # Make snapshots available for devices
-        call("zfs.dataset.update", ds, {"properties": {"snapdev": {"parsed": "visible"}}})
-        call("zfs.snapshot.create", {"dataset": ds, "name": "snap-1"})
-        assert call("iscsi.extent.disk_choices") == {
-            f'zvol/{ds.replace(" ", "+")}': f'{ds} (1 MiB)',
-            f'zvol/{ds.replace(" ", "+")}@snap-1': f'{ds}@snap-1 [ro]',
-        }
-
-        # Create new extent
-        with iscsi_extent({
-            "name": "test_extent",
-            "type": "DISK",
-            "disk": f"zvol/{ds.replace(' ', '+')}",
-        }):
-            # Verify that zvol is not available in iscsi disk choices
-            assert call("iscsi.extent.disk_choices") == {
-                f'zvol/{ds.replace(" ", "+")}@snap-1': f'{ds}@snap-1 [ro]',
-            }
-            # Verify that zvol is not availabe in VM disk choices
-            # (and snapshot zvol is not available too as it is read-only)
-            assert call("vm.device.disk_choices") == {}
-
-
-def test__iscsi_extent__create_with_invalid_disk_with_whitespace(request):
-    with dataset("test zvol", {
-        "type": "VOLUME",
-        "volsize": 1048576,
-    }) as ds:
-        with pytest.raises(ValidationErrors) as e:
-            with iscsi_extent({
-                "name": "test_extent",
-                "type": "DISK",
-                "disk": f"zvol/{ds}",
-            }):
-                pass
-
-        assert str(e.value) == (
-            f"[EINVAL] iscsi_extent_create.disk: Device '/dev/zvol/{ds}' for volume '{ds}' does not exist\n"
-        )
-
-
-def test__iscsi_extent__locked(request):
-    with dataset("test zvol", {
-        "type": "VOLUME",
-        "volsize": 1048576,
-        "inherit_encryption": False,
-        "encryption": True,
-        "encryption_options": {"passphrase": "testtest"},
-    }) as ds:
-        with iscsi_extent({
-            "name": "test_extent",
-            "type": "DISK",
-            "disk": f"zvol/{ds.replace(' ', '+')}",
-        }) as extent:
-            assert not extent["locked"]
-
-            call("pool.dataset.lock", ds, job=True)
-
-            extent = call("iscsi.extent.get_instance", extent["id"])
-            assert extent["locked"]
diff --git a/tests/api2/test_iscsi_auth_crud_roles.py b/tests/api2/test_iscsi_auth_crud_roles.py
deleted file mode 100644
index ed29e4f869132..0000000000000
--- a/tests/api2/test_iscsi_auth_crud_roles.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_AUTH_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.auth.query", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_AUTH_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.auth.create", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.auth.update", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.auth.delete", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_AUTH_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.auth.create", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.auth.update", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.auth.delete", role, True)
diff --git a/tests/api2/test_iscsi_auth_network.py b/tests/api2/test_iscsi_auth_network.py
deleted file mode 100644
index 79df689d0a207..0000000000000
--- a/tests/api2/test_iscsi_auth_network.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import contextlib
-import ipaddress
-import socket
-
-import pytest
-
-from middlewared.test.integration.assets.iscsi import target_login_test
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-
-@pytest.fixture(scope="module")
-def my_ip4():
-    """See which of my IP addresses will be used to connect."""
-    # Things can be complicated e.g. my NAT between the test runner
-    # and the target system  Therefore, first try using ssh into the
-    # remote system and see what it thinks our IP address is.
-    try:
-        myip = ipaddress.ip_address(ssh('echo $SSH_CLIENT').split()[0])
-        if myip.version != 4:
-            raise ValueError("Not a valid IPv4 address")
-        return str(myip)
-    except Exception:
-        # Fall back
-        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-        sock.settimeout(2)
-        result = sock.connect_ex((truenas_server.ip, 80))
-        assert result == 0
-        myip = sock.getsockname()[0]
-        sock.close()
-        # Check that we have an IPv4 address
-        socket.inet_pton(socket.AF_INET, myip)
-        return myip
-
-
-@contextlib.contextmanager
-def portal():
-    portal_config = call('iscsi.portal.create', {'listen': [{'ip': truenas_server.ip}], 'discovery_authmethod': 'NONE'})
-    try:
-        yield portal_config
-    finally:
-        call('iscsi.portal.delete', portal_config['id'])
-
-
-@contextlib.contextmanager
-def initiator():
-    initiator_config = call('iscsi.initiator.create', {})
-    try:
-        yield initiator_config
-    finally:
-        # Very likely that already cleaned up (by removing only target using it)
-        if call('iscsi.initiator.query', [['id', '=', initiator_config['id']]]):
-            call('iscsi.initiator.delete', initiator_config['id'])
-
-
-@contextlib.contextmanager
-def target(target_name, groups):
-    target_config = call('iscsi.target.create', {'name': target_name, 'groups': groups})
-    try:
-        yield target_config
-    finally:
-        call('iscsi.target.delete', target_config['id'])
-
-
-@contextlib.contextmanager
-def extent(extent_name, zvol_name=None):
-    zvol_name = zvol_name or extent_name
-    with dataset(zvol_name, {'type': 'VOLUME', 'volsize': 51200, 'volblocksize': '512', 'sparse': True}) as zvol:
-        extent_config = call('iscsi.extent.create', {'name': extent_name, 'disk': f'zvol/{zvol}'})
-        try:
-            yield extent_config
-        finally:
-            call('iscsi.extent.delete', extent_config['id'])
-
-
-@contextlib.contextmanager
-def target_extent(target_id, extent_id, lun_id):
-    target_extent_config = call(
-        'iscsi.targetextent.create', {'target': target_id, 'extent': extent_id, 'lunid': lun_id}
-    )
-    try:
-        yield target_extent_config
-    finally:
-        call('iscsi.targetextent.delete', target_extent_config['id'])
-
-
-@contextlib.contextmanager
-def configured_target_to_extent():
-    with portal() as portal_config:
-        with initiator() as initiator_config:
-            with target(
-                'test-target', groups=[{
-                    'portal': portal_config['id'],
-                    'initiator': initiator_config['id'],
-                    'auth': None,
-                    'authmethod': 'NONE'
-                }]
-            ) as target_config:
-                with extent('test_extent') as extent_config:
-                    with target_extent(target_config['id'], extent_config['id'], 1):
-                        yield {
-                            'extent': extent_config,
-                            'target': target_config,
-                            'global': call('iscsi.global.config'),
-                            'portal': portal_config,
-                        }
-
-
-@contextlib.contextmanager
-def configure_iscsi_service():
-    with configured_target_to_extent() as iscsi_config:
-        try:
-            call('service.start', 'iscsitarget')
-            assert call('service.started', 'iscsitarget') is True
-            yield iscsi_config
-        finally:
-            call('service.stop', 'iscsitarget')
-
-
-@pytest.mark.parametrize('valid', [True, False])
-def test_iscsi_auth_networks(valid):
-    with configure_iscsi_service() as config:
-        call(
-            'iscsi.target.update',
-            config['target']['id'],
-            {'auth_networks': [] if valid else ['8.8.8.8/32']}
-        )
-        portal_listen_details = config['portal']['listen'][0]
-        assert target_login_test(
-            f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}',
-            f'{config["global"]["basename"]}:{config["target"]["name"]}',
-        ) is valid
-
-
-@pytest.mark.parametrize('valid', [True, False])
-def test_iscsi_auth_networks_exact_ip(my_ip4, valid):
-    with configure_iscsi_service() as config:
-        call(
-            'iscsi.target.update',
-            config['target']['id'],
-            {'auth_networks': [f"{my_ip4}/32"] if valid else ['8.8.8.8/32']}
-        )
-        portal_listen_details = config['portal']['listen'][0]
-        assert target_login_test(
-            f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}',
-            f'{config["global"]["basename"]}:{config["target"]["name"]}',
-        ) is valid
-
-
-@pytest.mark.parametrize('valid', [True, False])
-def test_iscsi_auth_networks_netmask_24(my_ip4, valid):
-    # good_ip will be our IP with the last byte cleared.
-    good_ip = '.'.join(my_ip4.split('.')[:-1] + ['0'])
-    # bad_ip will be our IP with the second last byte changed and last byte cleared
-    n = (int(my_ip4.split('.')[2]) + 1) % 256
-    bad_ip = '.'.join(good_ip.split('.')[:2] + [str(n), '0'])
-    with configure_iscsi_service() as config:
-        call(
-            'iscsi.target.update',
-            config['target']['id'],
-            {'auth_networks': ["8.8.8.8/24", f"{good_ip}/24"] if valid else ["8.8.8.8/24", f"{bad_ip}/24"]}
-        )
-        portal_listen_details = config['portal']['listen'][0]
-        assert target_login_test(
-            f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}',
-            f'{config["global"]["basename"]}:{config["target"]["name"]}',
-        ) is valid
-
-
-@pytest.mark.parametrize('valid', [True, False])
-def test_iscsi_auth_networks_netmask_16(my_ip4, valid):
-    # good_ip will be our IP with the second last byte changed and last byte cleared
-    n = (int(my_ip4.split('.')[2]) + 1) % 256
-    good_ip = '.'.join(my_ip4.split('.')[:2] + [str(n), '0'])
-    # bad_ip will be the good_ip with the second byte changed
-    ip_list = good_ip.split('.')
-    n = (int(ip_list[1]) + 1) % 256
-    bad_ip = '.'.join([ip_list[0], str(n)] + ip_list[-2:])
-    with configure_iscsi_service() as config:
-        call(
-            'iscsi.target.update',
-            config['target']['id'],
-            {'auth_networks': ["8.8.8.8/16", f"{good_ip}/16"] if valid else ["8.8.8.8/16", f"{bad_ip}/16"]}
-        )
-        portal_listen_details = config['portal']['listen'][0]
-        assert target_login_test(
-            f'{portal_listen_details["ip"]}:{portal_listen_details["port"]}',
-            f'{config["global"]["basename"]}:{config["target"]["name"]}',
-        ) is valid
diff --git a/tests/api2/test_iscsi_extent_crud_roles.py b/tests/api2/test_iscsi_extent_crud_roles.py
deleted file mode 100644
index eae622008b59c..0000000000000
--- a/tests/api2/test_iscsi_extent_crud_roles.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_EXTENT_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.extent.query", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_EXTENT_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.extent.create", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.extent.update", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.extent.delete", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_EXTENT_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.extent.create", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.extent.update", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.extent.delete", role, True)
diff --git a/tests/api2/test_iscsi_global_crud_roles.py b/tests/api2/test_iscsi_global_crud_roles.py
deleted file mode 100644
index 971ead1d14380..0000000000000
--- a/tests/api2/test_iscsi_global_crud_roles.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_GLOBAL_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.global.config", role, True, valid_role_exception=False)
-    common_checks(unprivileged_user_fixture, "iscsi.global.sessions", role, True, valid_role_exception=False)
-    common_checks(unprivileged_user_fixture, "iscsi.global.client_count", role, True, valid_role_exception=False)
-    common_checks(unprivileged_user_fixture, "iscsi.global.alua_enabled", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_GLOBAL_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.global.update", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_GLOBAL_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.global.update", role, True)
diff --git a/tests/api2/test_iscsi_host_crud_roles.py b/tests/api2/test_iscsi_host_crud_roles.py
deleted file mode 100644
index 8f8615df05cf6..0000000000000
--- a/tests/api2/test_iscsi_host_crud_roles.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_HOST_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.host.query", role, True, valid_role_exception=False)
-    common_checks(unprivileged_user_fixture, "iscsi.host.get_initiators", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.host.get_targets", role, True)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_HOST_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.host.create", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.host.update", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.host.delete", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.host.set_initiators", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_HOST_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.host.create", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.host.update", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.host.delete", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.host.set_initiators", role, True)
diff --git a/tests/api2/test_iscsi_initiator_crud_roles.py b/tests/api2/test_iscsi_initiator_crud_roles.py
deleted file mode 100644
index f12ae6d9c8d70..0000000000000
--- a/tests/api2/test_iscsi_initiator_crud_roles.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_INITIATOR_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.initiator.query", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_INITIATOR_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.initiator.create", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.initiator.update", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.initiator.delete", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_INITIATOR_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.initiator.create", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.initiator.update", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.initiator.delete", role, True)
diff --git a/tests/api2/test_iscsi_portal_crud_roles.py b/tests/api2/test_iscsi_portal_crud_roles.py
deleted file mode 100644
index 882a483062dc5..0000000000000
--- a/tests/api2/test_iscsi_portal_crud_roles.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_PORTAL_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.portal.query", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_PORTAL_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.portal.create", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.portal.update", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.portal.delete", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_PORTAL_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.portal.create", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.portal.update", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.portal.delete", role, True)
diff --git a/tests/api2/test_iscsi_target_crud_roles.py b/tests/api2/test_iscsi_target_crud_roles.py
deleted file mode 100644
index 1991c2a3871e3..0000000000000
--- a/tests/api2/test_iscsi_target_crud_roles.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGET_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.target.query", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGET_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.target.create", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.target.update", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.target.delete", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_TARGET_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.target.create", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.target.update", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.target.delete", role, True)
diff --git a/tests/api2/test_iscsi_targetextent_crud_roles.py b/tests/api2/test_iscsi_targetextent_crud_roles.py
deleted file mode 100644
index 107c880c91890..0000000000000
--- a/tests/api2/test_iscsi_targetextent_crud_roles.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGETEXTENT_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.targetextent.query", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_ISCSI_READ", "SHARING_ISCSI_TARGETEXTENT_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.targetextent.create", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.targetextent.update", role, False)
-    common_checks(unprivileged_user_fixture, "iscsi.targetextent.delete", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_ISCSI_WRITE", "SHARING_ISCSI_TARGETEXTENT_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "iscsi.targetextent.create", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.targetextent.update", role, True)
-    common_checks(unprivileged_user_fixture, "iscsi.targetextent.delete", role, True)
diff --git a/tests/api2/test_job_credentials.py b/tests/api2/test_job_credentials.py
deleted file mode 100644
index 85e1dbc135459..0000000000000
--- a/tests/api2/test_job_credentials.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.utils import call, mock
-from unittest.mock import ANY
-
-
-def test_job_credentials():
-    with mock("test.test1", """    
-        from middlewared.service import job
-
-        @job()
-        def mock(self, job, *args):
-            return 42
-    """):
-        with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
-            job_id = c.call("test.test1")
-
-            job = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
-
-            assert job["credentials"] == {"type": "LOGIN_PASSWORD", "data": {"username": c.username, "login_at": ANY}}
diff --git a/tests/api2/test_job_errno.py b/tests/api2/test_job_errno.py
deleted file mode 100644
index 766dd9dc64e1d..0000000000000
--- a/tests/api2/test_job_errno.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call, mock
-from truenas_api_client import ClientException
-
-
-def test_job_errno():
-
-    with mock("test.test1", """
-        from middlewared.service import job
-        from middlewared.schema import returns, Password
-        from middlewared.service_exception import CallError
-
-        @job()
-        @returns(Password("my_password"))
-        def mock(self, job, *args):
-            raise CallError("canary", 13)
-    """):
-        job_id = call("test.test1")
-
-        with pytest.raises(ClientException):
-            call("core.job_wait", job_id, job=True)
-
-        result = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
-
-        assert "errno" in result["exc_info"]
-        assert result["exc_info"]["errno"] == 13
diff --git a/tests/api2/test_job_events.py b/tests/api2/test_job_events.py
deleted file mode 100644
index 726de846543aa..0000000000000
--- a/tests/api2/test_job_events.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import pprint
-
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.utils import call, client, mock
-
-
-def test_successful_job_events():
-    with mock("test.test1", """    
-        from middlewared.service import job
-
-        @job()
-        def mock(self, job, *args):
-            return 42
-    """):
-        with client() as c:
-            events = []
-
-            def callback(type, **message):
-                events.append((type, message))
-
-            c.subscribe("core.get_jobs", callback, sync=True)
-            c.call("test.test1", job=True)
-
-            # FIXME: Sometimes an equal message for `SUCCESS` state is being sent (or received) twice, we were not able
-            # to understand why and this does not break anything so we are not willing to waste our time investigating
-            # this.
-            if len(events) == 4 and events[2] == events[3]:
-                events = events[:3]
-
-            assert len(events) == 3, pprint.pformat(events, indent=2)
-            assert events[0][0] == "ADDED"
-            assert events[0][1]["fields"]["state"] == "WAITING"
-            assert events[1][0] == "CHANGED"
-            assert events[1][1]["fields"]["state"] == "RUNNING"
-            assert events[2][0] == "CHANGED"
-            assert events[2][1]["fields"]["state"] == "SUCCESS"
-            assert events[2][1]["fields"]["result"] == 42
-
-
-def test_unprivileged_user_only_sees_its_own_jobs_events():
-    with mock("test.test1", """
-        from middlewared.service import job
-
-        @job()
-        def mock(self, job, *args):
-            return 42
-    """):
-        with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
-            events = []
-
-            def callback(type, **message):
-                events.append((type, message))
-
-            c.subscribe("core.get_jobs", callback, sync=True)
-
-            call("test.test1", "secret", job=True)
-            c.call("test.test1", "not secret", job=True)
-
-            assert all(event[1]["fields"]["arguments"] == ["not secret"]
-                       for event in events), pprint.pformat(events, indent=2)
diff --git a/tests/api2/test_job_lock.py b/tests/api2/test_job_lock.py
deleted file mode 100644
index 9e476dfeb8a8a..0000000000000
--- a/tests/api2/test_job_lock.py
+++ /dev/null
@@ -1,172 +0,0 @@
-import contextlib
-import errno
-import os
-import time
-
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.utils import call, mock, ssh
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_jobs_execute_in_parallel():
-    with mock("test.test1", """    
-        from middlewared.service import job
-
-        @job()
-        def mock(self, job, *args):
-            import time
-            time.sleep(5)
-    """):
-        start = time.monotonic()
-
-        j1 = call("test.test1")
-        j2 = call("test.test1")
-        j3 = call("test.test1")
-
-        call("core.job_wait", j1, job=True)
-        call("core.job_wait", j2, job=True)
-        call("core.job_wait", j3, job=True)
-
-        assert time.monotonic() - start < 6
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_jobs_execute_sequentially_when_there_is_a_lock():
-    with mock("test.test1", """    
-        from middlewared.service import job
-
-        @job(lock="test")
-        def mock(self, job, *args):
-            import time
-            time.sleep(5)
-    """):
-        start = time.monotonic()
-
-        j1 = call("test.test1")
-        j2 = call("test.test1")
-        j3 = call("test.test1")
-
-        call("core.job_wait", j1, job=True)
-        call("core.job_wait", j2, job=True)
-        call("core.job_wait", j3, job=True)
-
-        assert time.monotonic() - start >= 15
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_lock_with_argument():
-    with mock("test.test1", """    
-        from middlewared.service import job
-
-        @job(lock=lambda args: f"test.{args[0]}")
-        def mock(self, job, s):
-            import time
-            time.sleep(5)
-    """):
-        start = time.monotonic()
-
-        j1 = call("test.test1", "a")
-        j2 = call("test.test1", "b")
-        j3 = call("test.test1", "a")
-
-        call("core.job_wait", j1, job=True)
-        call("core.job_wait", j2, job=True)
-        call("core.job_wait", j3, job=True)
-
-        assert 10 <= time.monotonic() - start < 15
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_lock_queue_size():
-    try:
-        with mock("test.test1", """
-            from middlewared.service import job
-            
-            @job(lock="test", lock_queue_size=1)
-            def mock(self, job, *args):
-                with open("/tmp/test", "a") as f:
-                    f.write("a\\n")
-            
-                import time
-                time.sleep(5)
-        """):
-            j1 = call("test.test1")
-            j2 = call("test.test1")
-            j3 = call("test.test1")
-            j4 = call("test.test1")
-
-            call("core.job_wait", j1, job=True)
-            call("core.job_wait", j2, job=True)
-            call("core.job_wait", j3, job=True)
-            call("core.job_wait", j4, job=True)
-
-            assert ssh("cat /tmp/test") == "a\na\n"
-
-            assert j3 == j2
-            assert j4 == j2
-    finally:
-        with contextlib.suppress(FileNotFoundError):
-            os.unlink("/tmp/test")
-
-
-def test_call_sync_a_job_with_lock():
-    with mock("test.test1", """
-        from middlewared.service import job
-
-        def mock(self):
-            return self.middleware.call_sync("test.test2").wait_sync()
-    """):
-        with mock("test.test2", """
-            from middlewared.service import job
-
-            @job(lock="test")
-            def mock(self, job, *args):
-                return 42
-        """):
-            assert call("test.test1") == 42
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_lock_queue_unprivileged_user_can_access_own_jobs():
-    with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
-        with mock("test.test1", """
-            from middlewared.service import job
-
-            @job(lock="test", lock_queue_size=1)
-            def mock(self, job, *args):
-                import time
-                time.sleep(5)
-        """):
-            j1 = c.call("test.test1")
-            j2 = c.call("test.test1")
-            j3 = c.call("test.test1")
-            assert j3 == j2
-
-            call("core.job_wait", j1, job=True)
-            call("core.job_wait", j2, job=True)
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_lock_queue_unprivileged_user_cant_access_others_jobs():
-    with unprivileged_user_client(allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
-        with mock("test.test1", """
-            from middlewared.service import job
-
-            @job(lock="test", lock_queue_size=1)
-            def mock(self, job, *args):
-                import time
-                time.sleep(5)
-        """):
-            j1 = call("test.test1")
-            j2 = call("test.test1")
-            try:
-                with pytest.raises(CallError) as ve:
-                    c.call("test.test1")
-
-                assert ve.value.errno == errno.EBUSY
-            finally:
-                call("core.job_wait", j1, job=True)
-                call("core.job_wait", j2, job=True)
diff --git a/tests/api2/test_job_logs.py b/tests/api2/test_job_logs.py
deleted file mode 100644
index 044d2e0dbe119..0000000000000
--- a/tests/api2/test_job_logs.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import errno
-
-import pytest
-import requests
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.utils import call, mock, url
-
-
-@pytest.fixture(scope="module")
-def c():
-    with unprivileged_user_client(roles=["REPLICATION_TASK_READ"],
-                                  allowlist=[{"method": "CALL", "resource": "test.test1"}]) as c:
-        yield c
-
-
-def test_job_download_logs(c):
-    with mock("test.test1", """    
-        from middlewared.service import job
-
-        @job(logs=True)
-        def mock(self, job, *args):
-            job.logs_fd.write(b'Job logs')
-    """):
-        jid = c.call("test.test1")
-
-        c.call("core.job_wait", jid, job=True)
-
-        path = c.call("core.job_download_logs", jid, "logs.txt")
-
-        r = requests.get(f"{url()}{path}")
-        r.raise_for_status()
-
-        assert r.headers["Content-Disposition"] == "attachment; filename=\"logs.txt\""
-        assert r.headers["Content-Type"] == "application/octet-stream"
-        assert r.text == "Job logs"
-
-
-def test_job_download_logs_unprivileged_downloads_internal_logs(c):
-    with mock("test.test1", """
-        def mock(self, *args):
-            job = self.middleware.call_sync("test.test2")
-            job.wait_sync(raise_error=True)
-            return job.id
-    """):
-        with mock("test.test2", """
-            from middlewared.service import job
-
-            @job(logs=True)
-            def mock(self, job, *args):
-                job.logs_fd.write(b'Job logs')
-        """):
-            jid = call("test.test1")
-
-            with pytest.raises(CallError) as ve:
-                c.call("core.job_download_logs", jid, "logs.txt")
-
-            assert ve.value.errno == errno.EPERM
-
-
-def test_job_download_logs_unprivileged_downloads_internal_logs_with_read_role(c):
-    with mock("test.test1", """
-        from middlewared.service import job
-
-        @job(logs=True, read_roles=["REPLICATION_TASK_READ"])
-        def mock(self, job, *args):
-            job.logs_fd.write(b'Job logs')
-    """):
-        jid = call("test.test1")
-
-        c.call("core.job_wait", jid, job=True)
-
-        path = c.call("core.job_download_logs", jid, "logs.txt")
-
-        r = requests.get(f"{url()}{path}")
-        r.raise_for_status()
-
-        assert r.headers["Content-Disposition"] == "attachment; filename=\"logs.txt\""
-        assert r.headers["Content-Type"] == "application/octet-stream"
-        assert r.text == "Job logs"
diff --git a/tests/api2/test_job_result.py b/tests/api2/test_job_result.py
deleted file mode 100644
index c596a8dd6926b..0000000000000
--- a/tests/api2/test_job_result.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from middlewared.test.integration.utils import call, mock
-
-
-def test_job_result():
-
-    with mock("test.test1", """
-        from middlewared.service import job
-        from middlewared.schema import returns, Password
-
-        @job()
-        @returns(Password("my_password"))
-        def mock(self, job, *args):
-            return "canary"
-    """):
-        job_id = call("test.test1")
-
-        result = call("core.job_wait", job_id, job=True)
-        # Waiting for result should give unredacted version
-        assert result == "canary"
-
-        # Querying by default should redact
-        job = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
-        assert job["result"] != "canary"
-
-        # but we should also be able to get unredacted result if needed
-        job = call("core.get_jobs", [["id", "=", job_id]], {"get": True, "extra": {"raw_result": True}})
-        assert job["result"] == "canary"
diff --git a/tests/api2/test_keychain_ssh.py b/tests/api2/test_keychain_ssh.py
deleted file mode 100644
index a7899a17b49d9..0000000000000
--- a/tests/api2/test_keychain_ssh.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-@pytest.fixture(scope="module")
-def credential():
-    credential = call("keychaincredential.create", {
-        "name": "key",
-        "type": "SSH_KEY_PAIR",
-        "attributes": call("keychaincredential.generate_ssh_key_pair"),
-    })
-    try:
-        yield credential
-    finally:
-        call("keychaincredential.delete", credential["id"])
-
-
-def test_remote_ssh_semiautomatic_setup_invalid_homedir(credential):
-    with user({
-        "username": "admin",
-        "full_name": "admin",
-        "group_create": True,
-        "home_create": False,
-        "password": "test1234",
-    }):
-        token = call("auth.generate_token", 600, {}, False)
-        with pytest.raises(CallError) as ve:
-            call("keychaincredential.remote_ssh_semiautomatic_setup", {
-                "name": "localhost",
-                "url": "http://localhost",
-                "token": token,
-                "username": "admin",
-                "private_key": credential["id"],
-            })
-
-        assert "make sure that home directory for admin user on the remote system exists" in ve.value.errmsg
-
-
-def test_remote_ssh_semiautomatic_setup_sets_user_attributes(credential):
-    with dataset("unpriv_homedir") as homedir:
-        with user({
-            "username": "unpriv",
-            "full_name": "unpriv",
-            "group_create": True,
-            "home": f"/mnt/{homedir}",
-            "password_disabled": True,
-            "smb": False,
-            "shell": "/usr/sbin/nologin",
-        }):
-            token = call("auth.generate_token", 600, {}, False)
-            connection = call("keychaincredential.remote_ssh_semiautomatic_setup", {
-                "name": "localhost",
-                "url": "http://localhost",
-                "token": token,
-                "username": "unpriv",
-                "private_key": credential["id"],
-            })
-            try:
-                call("replication.list_datasets", "SSH", connection["id"])
-            finally:
-                call("keychaincredential.delete", connection["id"])
-
-
-def test_ssl_certificate_error(credential):
-    token = call("auth.generate_token", 600, {}, False)
-    with pytest.raises(CallError) as ve:
-        call("keychaincredential.remote_ssh_semiautomatic_setup", {
-            "name": "localhost",
-            # Should fail on default self-signed certificate
-            "url": "https://localhost",
-            "token": token,
-            "private_key": credential["id"],
-        })
-
-    assert ve.value.errno == CallError.ESSLCERTVERIFICATIONERROR
-
-
-def test_ignore_ssl_certificate_error(credential):
-    token = call("auth.generate_token", 600, {}, False)
-    connection = call("keychaincredential.remote_ssh_semiautomatic_setup", {
-        "name": "localhost",
-        "url": "https://localhost",
-        "verify_ssl": False,
-        "token": token,
-        "private_key": credential["id"],
-    })
-    call("keychaincredential.delete", connection["id"])
diff --git a/tests/api2/test_large_message.py b/tests/api2/test_large_message.py
deleted file mode 100644
index e97efb4432485..0000000000000
--- a/tests/api2/test_large_message.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils.client import client
-from truenas_api_client import ClientException
-
-
-MSG_TOO_BIG_ERR = 'Max message length is 64 kB'
-
-
-def test_large_message_default():
-    LARGE_PAYLOAD_1 = 'x' * 65537
-
-    with pytest.raises(ClientException) as ce:
-        with client() as c:
-            c.call('filesystem.mkdir', LARGE_PAYLOAD_1)
-
-    assert MSG_TOO_BIG_ERR in ce.value.error
-
-
-def test_large_message_extended():
-    LARGE_PAYLOAD_1 = 'x' * 65537
-    LARGE_PAYLOAD_2 = 'x' * 2097153
-
-    # NOTE: we are intentionally passing an invalid payload here
-    # to avoid writing unnecessary file to VM FS. If it fails with
-    # ValidationErrors instead of a ClientException then we know that
-    # the call passed through the size check.
-    with pytest.raises(ValidationErrors):
-        with client() as c:
-            c.call('filesystem.file_receive', LARGE_PAYLOAD_1)
-
-    with pytest.raises(ClientException) as ce:
-        with client() as c:
-            c.call('filesystem.file_receive', LARGE_PAYLOAD_2)
-
-    assert MSG_TOO_BIG_ERR in ce.value.error
-
-
-def test_large_message_unauthenticated():
-    LARGE_PAYLOAD = 'x' * 10000
-
-    with pytest.raises(ClientException) as ce:
-        with client(auth=None) as c:
-            c.call('filesystem.file_receive', LARGE_PAYLOAD)
-
-    assert 'Anonymous connection max message length' in ce.value.error
diff --git a/tests/api2/test_legacy_websocket.py b/tests/api2/test_legacy_websocket.py
deleted file mode 100644
index 4a217021131e9..0000000000000
--- a/tests/api2/test_legacy_websocket.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import pytest
-
-from truenas_api_client import Client
-
-from middlewared.test.integration.assets.cloud_sync import credential
-from middlewared.test.integration.utils import password, websocket_url
-
-
-@pytest.fixture(scope="module")
-def c():
-    with Client(websocket_url() + "/websocket") as c:
-        c.call("auth.login_ex", {
-            "mechanism": "PASSWORD_PLAIN",
-            "username": "root",
-            "password": password(),
-        })
-        yield c
-
-
-def test_adapts_cloud_credentials(c):
-    with credential({
-        "provider": {
-            "type": "FTP",
-            "host": "localhost",
-            "port": 21,
-            "user": "test",
-            "pass": "",
-        },
-    }) as cred:
-        result = c.call("cloudsync.credentials.get_instance", cred["id"])
-        assert result["provider"] == "FTP"
diff --git a/tests/api2/test_listdir_request_mask.py b/tests/api2/test_listdir_request_mask.py
deleted file mode 100644
index 7694fc497c6b4..0000000000000
--- a/tests/api2/test_listdir_request_mask.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import enum
-import pytest
-
-from middlewared.test.integration.utils import call
-
-
-class DirectoryRequestMask(enum.IntFlag):
-    ACL = enum.auto()
-    CTLDIR = enum.auto()
-    REALPATH = enum.auto()
-    XATTRS = enum.auto()
-    ZFS_ATTRS = enum.auto()
-
-
-@pytest.mark.parametrize('select_key,request_mask', [
-    ('realpath', DirectoryRequestMask.REALPATH.value),
-    ('acl', DirectoryRequestMask.ACL.value),
-    ('zfs_attrs', DirectoryRequestMask.ZFS_ATTRS.value),
-    ('is_ctldir', DirectoryRequestMask.CTLDIR.value),
-    ('xattrs', DirectoryRequestMask.XATTRS.value),
-    (['xattrs', 'user_xattrs'], DirectoryRequestMask.XATTRS.value),
-    ([], None),
-    ('name', 0)
-])
-def test__select_to_request_mask(select_key, request_mask):
-    if select_key == []:
-        val = call('filesystem.listdir_request_mask', [])
-        assert val is None
-    else:
-        val = call('filesystem.listdir_request_mask', [select_key])
-        assert val == request_mask
diff --git a/tests/api2/test_localhost_ws_auth.py b/tests/api2/test_localhost_ws_auth.py
deleted file mode 100644
index 36cb392fef9a9..0000000000000
--- a/tests/api2/test_localhost_ws_auth.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from middlewared.test.integration.utils import ssh
-
-
-def test__authentication_required_localhost():
-    cmd = 'midclt -u ws://localhost/websocket call user.query'
-    resp = ssh(cmd, check=False, complete_response=True)
-
-    assert not resp['result']
-
-    assert 'Not authenticated' in resp['stderr']
-
diff --git a/tests/api2/test_lock.py b/tests/api2/test_lock.py
deleted file mode 100644
index d5d621d786950..0000000000000
--- a/tests/api2/test_lock.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import time
-
-import pytest
-
-from middlewared.test.integration.utils import client, mock
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_no_lock():
-    with mock("test.test1", """
-        from middlewared.service import lock
-
-        async def mock(self, *args):
-            import asyncio
-            await asyncio.sleep(5)
-    """):
-        start = time.monotonic()
-
-        with client() as c:
-            c1 = c.call("test.test1", background=True, register_call=True)
-            c2 = c.call("test.test1", background=True, register_call=True)
-            c.wait(c1, timeout=10)
-            c.wait(c2)
-
-        assert time.monotonic() - start < 6
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_async_lock():
-    with mock("test.test1", """
-        from middlewared.service import lock
-
-        @lock("test")
-        async def mock(self, *args):
-            import asyncio
-            await asyncio.sleep(5)
-    """):
-        start = time.monotonic()
-
-        with client() as c:
-            c1 = c.call("test.test1", background=True, register_call=True)
-            c2 = c.call("test.test1", background=True, register_call=True)
-            c.wait(c1)
-            c.wait(c2)
-
-        assert time.monotonic() - start >= 10
-
-
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_threading_lock():
-    with mock("test.test1", """
-        from middlewared.service import lock
-
-        @lock("test")
-        def mock(self, *args):
-            import time
-            time.sleep(5)
-    """):
-        start = time.monotonic()
-
-        with client() as c:
-            c1 = c.call("test.test1", background=True, register_call=True)
-            c2 = c.call("test.test1", background=True, register_call=True)
-            c.wait(c1)
-            c.wait(c2)
-
-        assert time.monotonic() - start >= 10
diff --git a/tests/api2/test_mail.py b/tests/api2/test_mail.py
deleted file mode 100644
index 82fde4c77fc18..0000000000000
--- a/tests/api2/test_mail.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from middlewared.test.integration.utils import call
-
-
-def test_config_settings():
-    payload = {
-        "fromemail": "william.spam@ixsystems.com",
-        "outgoingserver": "mail.ixsystems.com",
-        "pass": "changeme",
-        "port": 25,
-        "security": "PLAIN",
-        "smtp": True,
-        "user": "william.spam@ixsystems.com"
-    }
-    call("mail.update", payload)
-    config = call("mail.config")
-    # test that payload is a subset of config
-    assert payload.items() <= config.items()
diff --git a/tests/api2/test_mail_admins.py b/tests/api2/test_mail_admins.py
deleted file mode 100644
index 3d9c8edaef300..0000000000000
--- a/tests/api2/test_mail_admins.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.utils import call
-
-MAILUSER = 'wilbur'
-MAILADDR = 'wilbur.spam@ixsystems.com'
-NONMAIL_USER = 'wilburette'
-NONMAIL_ADDR = 'wilburette.spam@ixsystems.com'
-PASSWD = 'abcd1234'
-
-
-@pytest.fixture(scope='module')
-def full_admin_user():
-    ba_id = call('group.query', [['gid', '=', 544]], {'get': True})['id']
-    with user({
-        'username': NONMAIL_USER,
-        'full_name': NONMAIL_USER,
-        'group_create': True,
-        'email': NONMAIL_ADDR,
-        'password': PASSWD
-    }, get_instance=False):
-        with user({
-            'username': MAILUSER,
-            'full_name': MAILUSER,
-            'group_create': False,
-            'email': MAILADDR,
-            'group': ba_id,
-            'password': PASSWD
-        }, get_instance=True) as u:
-            yield u
-
-
-def test_mail_administrators(full_admin_user):
-    emails = call('mail.local_administrators_emails')
-    assert MAILADDR in emails
-    assert NONMAIL_ADDR not in emails
diff --git a/tests/api2/test_mock_remote.py b/tests/api2/test_mock_remote.py
deleted file mode 100644
index 50cf24e0c31fe..0000000000000
--- a/tests/api2/test_mock_remote.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import pytest
-from auto_config import ha
-from middlewared.test.integration.utils import call, mock
-
-pytestmark = pytest.mark.skipif(not ha, reason='Tests applicable to HA only')
-
-VALID_NODES = ['A', 'B']
-
-
-def test__mock_remote_node():
-    """
-    Test that we can mock on the remote node, using direct calls to verify.
-    """
-    this_node = call('failover.node')
-    assert this_node in VALID_NODES
-    other_node = call('failover.call_remote', 'failover.node')
-    assert other_node in VALID_NODES
-    assert this_node != other_node
-    with mock('failover.node', return_value='BOGUS1'):
-        assert call('failover.node') == 'BOGUS1'
-        assert call('failover.call_remote', 'failover.node') == other_node
-        with mock('failover.node', return_value='BOGUS2', remote=True):
-            assert call('failover.node') == 'BOGUS1'
-            assert call('failover.call_remote', 'failover.node') == 'BOGUS2'
-        assert call('failover.node') == 'BOGUS1'
-        assert call('failover.call_remote', 'failover.node') == other_node
-    assert call('failover.node') == this_node
-    assert call('failover.call_remote', 'failover.node') == other_node
-
-
-def test__mock_remote_indirect():
-    """
-    Test that we can mock on the remote node, using indirect calls to verify.
-    """
-    mmd = call('failover.mismatch_disks')
-    assert mmd['missing_local'] == []
-    assert mmd['missing_remote'] == []
-    disks = call('failover.get_disks_local')
-    with mock('failover.get_disks_local', return_value=disks[1:], remote=True):
-        mmd = call('failover.mismatch_disks')
-        assert mmd['missing_local'] == []
-        assert mmd['missing_remote'] == [disks[0]]
-    mmd = call('failover.mismatch_disks')
-    assert mmd['missing_local'] == []
-    assert mmd['missing_remote'] == []
diff --git a/tests/api2/test_network_configuration.py b/tests/api2/test_network_configuration.py
deleted file mode 100644
index 02685c2e38791..0000000000000
--- a/tests/api2/test_network_configuration.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from middlewared.test.integration.utils import call, ssh
-
-from auto_config import ha
-
-NEW_HOSTNAME = 'dummy123'
-
-
-def fetch_hostname():
-    name = ssh('hostname').strip()
-    if ha:
-        return name.removesuffix('-nodea').removesuffix('-nodeb')
-    return name
-
-
-def config_read_hostname():
-    config = call('network.configuration.config')
-    if ha:
-        return config['hostname_virtual']
-    else:
-        return config['hostname']
-
-
-def config_set_hostname(name):
-    if ha:
-        payload = {'hostname': f'{name}-nodea',
-                   'hostname_b': f'{name}-nodeb',
-                   'hostname_virtual': name}
-    else:
-        payload = {'hostname': name}
-    call('network.configuration.update', payload)
-
-
-def test_changing_hostname():
-    current_hostname = config_read_hostname()
-
-    config_set_hostname(NEW_HOSTNAME)
-    try:
-        assert fetch_hostname() == NEW_HOSTNAME
-    finally:
-        config_set_hostname(current_hostname)
-        assert fetch_hostname() == current_hostname
diff --git a/tests/api2/test_nfs_share_crud_roles.py b/tests/api2/test_nfs_share_crud_roles.py
deleted file mode 100644
index 2606c049aad5d..0000000000000
--- a/tests/api2/test_nfs_share_crud_roles.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_NFS_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "sharing.nfs.query", role, True, valid_role_exception=False)
-    common_checks(unprivileged_user_fixture, "nfs.client_count", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_NFS_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "sharing.nfs.create", role, False)
-    common_checks(unprivileged_user_fixture, "sharing.nfs.update", role, False)
-    common_checks(unprivileged_user_fixture, "sharing.nfs.delete", role, False)
-    common_checks(unprivileged_user_fixture, "nfs.get_nfs3_clients", role, False)
-    common_checks(unprivileged_user_fixture, "nfs.get_nfs4_clients", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_NFS_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "sharing.nfs.create", role, True)
-    common_checks(unprivileged_user_fixture, "sharing.nfs.update", role, True)
-    common_checks(unprivileged_user_fixture, "sharing.nfs.delete", role, True)
-    common_checks(unprivileged_user_fixture, "nfs.get_nfs3_clients", role, True, valid_role_exception=False)
-    common_checks(unprivileged_user_fixture, "nfs.get_nfs4_clients", role, True, valid_role_exception=False)
-    common_checks(
-        unprivileged_user_fixture, "service.start", role, True, method_args=["nfs"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.restart", role, True, method_args=["nfs"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.reload", role, True, method_args=["nfs"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.stop", role, True, method_args=["nfs"], valid_role_exception=False
-    )
diff --git a/tests/api2/test_nfsv4_top_level_dataset.py b/tests/api2/test_nfsv4_top_level_dataset.py
deleted file mode 100644
index 5bc500872ed53..0000000000000
--- a/tests/api2/test_nfsv4_top_level_dataset.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, pool
-
-
-@pytest.fixture(scope='module')
-def set_nfsv4_top_level():
-    call('pool.dataset.update', pool, {'acltype': 'NFSV4', 'aclmode': 'PASSTHROUGH'})
-
-    try:
-        yield
-    finally:
-        call('pool.dataset.update', pool, {'acltype': 'POSIX', 'aclmode': 'DISCARD'})
-
-
-def test__acltype_inherit(set_nfsv4_top_level):
-    with dataset('v4inherit') as ds:
-        entry = call('pool.dataset.query', [['name', '=', ds]], {'get': True})
-
-        assert entry['acltype']['value'] == 'NFSV4'
-        assert entry['aclmode']['value'] == 'PASSTHROUGH'
diff --git a/tests/api2/test_ntpserver_alert.py b/tests/api2/test_ntpserver_alert.py
deleted file mode 100644
index 2468f2fbea3a5..0000000000000
--- a/tests/api2/test_ntpserver_alert.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import contextlib
-import copy
-import time
-
-from middlewared.test.integration.utils import call, mock, ssh
-
-CONFIG_FILE = "/etc/chrony/chrony.conf"
-BAD_NTP = "172.16.0.0"
-
-
-@contextlib.contextmanager
-def temp_remove_ntp_config():
-    orig = call("system.ntpserver.query")
-    try:
-        for i in orig:
-            _id = i.pop("id")
-            assert call("system.ntpserver.delete", _id)
-        yield copy.deepcopy(orig[0])  # arbitrarily yield first entry
-    finally:
-        for i in orig:
-            # finally update with original (functional) config
-            assert call("system.ntpserver.create", i)
-
-
-def test_verify_ntp_alert_is_raised():
-    with temp_remove_ntp_config() as temp:
-        temp["address"] = BAD_NTP
-        temp["force"] = True
-        temp_id = call("system.ntpserver.create", temp)["id"]
-        call("system.ntpserver.query", [["address", "=", BAD_NTP]], {"get": True})
-
-        # verify the OS config
-        results = ssh(f'fgrep "{BAD_NTP}" {CONFIG_FILE}', complete_response=True)
-        assert results["result"] is True, results
-
-        # verify alert is raised
-        with mock("system.time_info", return_value={"uptime_seconds": 600}):
-            assert call("alert.run_source", "NTPHealthCheck")[0]["args"]["reason"].startswith("No Active NTP peers")
-
-        # remove our bogus entry
-        assert call("system.ntpserver.delete", temp_id)
-
-
-def test_verify_ntp_alert_is_cleared():
-    max_retries = 10
-    for i in range(max_retries):
-        alerts = call("alert.run_source", "NTPHealthCheck")
-        if not alerts:
-            return
-        else:
-            time.sleep(1)
-
-    assert False, f"NTPHealthCheck alert didnt clear after {max_retries} seconds: {alerts}"
diff --git a/tests/api2/test_openssl.py b/tests/api2/test_openssl.py
deleted file mode 100644
index 0643ba120b7b0..0000000000000
--- a/tests/api2/test_openssl.py
+++ /dev/null
@@ -1,28 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call, ssh
-from auto_config import ha
-
-retry = 5
-fips_version = "3.0.9"
-
-
-# Sometimes this test fails because the testing environment has broken failover (randomly. Fun transient error. Reports a failed heartbeat).
-@pytest.mark.flaky(reruns=retry, reruns_delay=5)
-@pytest.mark.skipif(not ha, reason='Test only valid for HA')
-def test_fips_version():
-    # The reason we have a set of commands in a payload is because of some annoying FIPS technicalities.
-    # Basically, when FIPS is enabled, we can't use SSH because the SSH key used by root isn't using a FIPS provided algorithm. (this might need to be investigated further)
-    # To allow testing, we write our FIPS information to a file during this phase, and then go disable FIPS to get SSH back all in one joint command.
-    payload = """midclt call --job system.security.update '{"enable_fips": true}' && openssl list -providers > /root/osslproviders && midclt call system.reboot.info >> /root/osslproviders && midclt call --job system.security.update '{"enable_fips": false}'"""
-
-    ssh(payload, complete_response=True, timeout=300)
-
-    # Check that things are what we expect when FIPS was enabled
-    enabled_info = ssh("cat /root/osslproviders")
-    assert fips_version in enabled_info
-    assert "FIPS configuration was changed." in enabled_info
-
-    # Check that we no longer have FIPS enabled
-    assert fips_version not in ssh("openssl list -providers")
-    assert call("system.reboot.info")["reboot_required_reasons"] == []
diff --git a/tests/api2/test_password_reset.py b/tests/api2/test_password_reset.py
deleted file mode 100644
index fc99c9002005f..0000000000000
--- a/tests/api2/test_password_reset.py
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/env python3
-import errno
-import pytest
-import secrets
-import string
-
-from middlewared.service_exception import CallError, ValidationErrors
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.account import unprivileged_user
-from middlewared.test.integration.utils import call, client
-from middlewared.test.integration.utils.audit import expect_audit_method_calls
-
-
-TEST_USERNAME = 'testpasswduser'
-TEST_USERNAME_2 = 'testpasswduser2'
-TEST_GROUPNAME = 'testpasswdgroup'
-TEST_PASSWORD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-TEST_PASSWORD_2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-TEST_PASSWORD2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-TEST_PASSWORD2_2 = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-REDACTED = '********'
-
-
-def test_restricted_user_set_password():
-    with unprivileged_user(
-        username=TEST_USERNAME,
-        group_name=TEST_GROUPNAME,
-        privilege_name='TEST_PASSWD_RESET_PRIVILEGE',
-        allowlist=[],
-        web_shell=False,
-        roles=['READONLY_ADMIN']
-    ) as acct:
-        with client(auth=(acct.username, acct.password)) as c:
-            payload = {
-                'username': acct.username,
-                'old_password': acct.password,
-                'new_password': TEST_PASSWORD
-            }
-
-            # Password reset using existing password and current user should work
-            with expect_audit_method_calls([{
-                'method': 'user.set_password',
-                'params': [{
-                    'username': acct.username,
-                    'old_password': REDACTED,
-                    'new_password': REDACTED
-                }],
-                'description': f'Set account password {acct.username}',
-            }]):
-                c.call('user.set_password', payload)
-
-            # Should be able to create new client session with new password
-            with client(auth=(acct.username, TEST_PASSWORD)) as c2:
-                c2.call('auth.me')
-
-        # FULL_ADMIN privileges should also allow password reset:
-        call('user.set_password', {
-            'username': acct.username,
-            'old_password': TEST_PASSWORD,
-            'new_password': TEST_PASSWORD_2
-        })
-
-        # FULL_ADMIN should also be able to skip password checks
-        call('user.set_password', {
-            'username': acct.username,
-            'new_password': TEST_PASSWORD_2,
-        })
-
-        group_id = call('group.query', [['group', '=', TEST_GROUPNAME]], {'get': True})['id']
-
-        # Create additional user with READONLY privilege
-        with user({
-           'username': TEST_USERNAME_2,
-           'full_name': TEST_USERNAME_2,
-           'group_create': True,
-           'groups': [group_id],
-           'smb': False,
-           'password': TEST_PASSWORD2
-        }) as u:
-            with client(auth=(TEST_USERNAME_2, TEST_PASSWORD2)) as c2:
-                # Limited users should not be able to change other
-                # passwords of other users
-                with pytest.raises(CallError) as ve:
-                    c2.call('user.set_password', {
-                        'username': acct.username,
-                        'old_password': TEST_PASSWORD_2,
-                        'new_password': 'CANARY'
-                    })
-
-                assert ve.value.errno == errno.EPERM
-
-                with pytest.raises(ValidationErrors) as ve:
-                    # Limited users should not be able to skip password checks
-                    c2.call('user.set_password', {
-                        'username': TEST_USERNAME_2,
-                        'new_password': 'CANARY',
-                    })
-
-                with pytest.raises(ValidationErrors) as ve:
-                    # Providing invalid old password for a limited user
-                    # should raise an error
-                    c2.call('user.set_password', {
-                        'username': TEST_USERNAME_2,
-                        'old_password': 'ANOTHER CANARY',
-                        'new_password': 'CANARY',
-                    })
-
-            call("user.update", u['id'], {'password_disabled': True})
-            with pytest.raises(ValidationErrors) as ve:
-                # This should fail because we've disabled password auth
-                call('user.set_password', {
-                    'username': TEST_USERNAME_2,
-                    'old_password': TEST_PASSWORD2,
-                    'new_password': 'CANARY'
-                })
-
-            call("user.update", u['id'], {
-                'password_disabled': False,
-                'locked': True
-            })
-
-            with pytest.raises(ValidationErrors) as ve:
-                # This should fail because we've locked account
-                call('user.set_password', {
-                    'username': TEST_USERNAME_2,
-                    'old_password': TEST_PASSWORD2,
-                    'new_password': 'CANARY'
-                })
-
-            call("user.update", u['id'], {
-                'password_disabled': False,
-                'locked': False
-            })
-
-            # Unlocking user should allow password reset to succeed
-            with client(auth=(TEST_USERNAME_2, TEST_PASSWORD2)) as c2:
-                c2.call('user.set_password', {
-                    'username': TEST_USERNAME_2,
-                    'old_password': TEST_PASSWORD2,
-                    'new_password': TEST_PASSWORD2_2
-                })
diff --git a/tests/api2/test_pool_attach.py b/tests/api2/test_pool_attach.py
deleted file mode 100644
index cd0b039f82574..0000000000000
--- a/tests/api2/test_pool_attach.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, ssh
-
-
-def test_attach_raidz1_vdev():
-    with another_pool(topology=(6, lambda disks: {
-        "data": [
-            {
-                "type": "RAIDZ1",
-                "disks": disks[0:3]
-            },
-            {
-                "type": "RAIDZ1",
-                "disks": disks[3:6]
-            },
-        ],
-    })) as pool:
-        disk = call("disk.get_unused")[0]["name"]
-
-        call("pool.attach", pool["id"], {
-            "target_vdev": pool["topology"]["data"][0]["guid"],
-            "new_disk": disk,
-        }, job=True)
-
-        pool = call("pool.get_instance", pool["id"])
-        assert pool["expand"]["state"] == "FINISHED"
diff --git a/tests/api2/test_pool_dataset_acl.py b/tests/api2/test_pool_dataset_acl.py
deleted file mode 100644
index 948cef934d6b5..0000000000000
--- a/tests/api2/test_pool_dataset_acl.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import dataclasses
-import errno
-
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from truenas_api_client import ClientException
-
-
-@dataclasses.dataclass
-class AclIds:
-    user_to_add: int = 8765309
-    user2_to_add: int = 8765310
-    group_to_add: int = 1138
-
-
-def check_for_entry(acl, id_type, xid, perms, is_posix=False):
-    has_entry = has_default = has_access = False
-    for ace in acl:
-        if ace['id'] == xid and ace['tag'] == id_type and ace['perms'] == perms:
-            if is_posix:
-                if ace['default']:
-                    assert has_default is False
-                    has_default = True
-                else:
-                    assert has_access is False
-                    has_access = True
-
-            else:
-                assert has_entry is False
-                has_entry = True
-
-    return has_entry or (has_access and has_default)
-
-
-def test_simplified_apps_api_posix_acl():
-    posix_acl = [
-       {'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'MODIFY'},
-       {'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'READ'},
-       {'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'FULL_CONTROL'},
-    ]
-    with dataset('APPS_POSIX') as ds:
-        ds_path = f'/mnt/{ds}'
-        call('filesystem.add_to_acl', {'path': ds_path, 'entries': posix_acl}, job=True)
-        acl = call('filesystem.getacl', ds_path)['acl']
-        assert check_for_entry(
-            acl,
-            'USER',
-            AclIds.user_to_add,
-            {'READ': True, 'WRITE': True, 'EXECUTE': True}, True
-        ), acl
-        assert check_for_entry(
-            acl,
-            'GROUP',
-            AclIds.group_to_add,
-            {'READ': True, 'WRITE': False, 'EXECUTE': True}, True
-        ), acl
-
-
-def test_simplified_apps_api_nfs4_acl(request):
-    nfs4_acl = [
-       {'id_type': 'USER', 'id': AclIds.user_to_add, 'access': 'MODIFY'},
-       {'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'READ'},
-       {'id_type': 'USER', 'id': AclIds.user2_to_add, 'access': 'FULL_CONTROL'},
-    ]
-    with dataset('APPS_NFS4', {'share_type': 'APPS'}) as ds:
-        ds_path = f'/mnt/{ds}'
-        call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl}, job=True)
-        acl = call('filesystem.getacl', ds_path)['acl']
-        assert check_for_entry(acl, 'USER', AclIds.user_to_add, {'BASIC': 'MODIFY'}), acl
-        assert check_for_entry(acl, 'GROUP', AclIds.group_to_add, {'BASIC': 'READ'}), acl
-        assert check_for_entry(acl, 'USER', AclIds.user2_to_add, {'BASIC': 'FULL_CONTROL'}), acl
-
-        # check behavior of using force option.
-        # presence of file in path should trigger failure if force is not set
-        results = ssh(f'touch {ds_path}/canary', complete_response=True)
-        assert results['result'] is True, results
-
-        acl_changed = call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl}, job=True)
-
-        assert acl_changed is False
-
-        with pytest.raises(ClientException):
-            call('filesystem.add_to_acl', {'path': ds_path, 'entries': nfs4_acl + [
-                {'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'MODIFY'},
-            ]}, job=True)
-
-        # check behavior of using force option.
-        # second call with `force` specified should succeed
-        acl_changed = call('filesystem.add_to_acl', {
-            'path': ds_path,
-            'entries': nfs4_acl + [{'id_type': 'GROUP', 'id': AclIds.group_to_add, 'access': 'MODIFY'}],
-            'options': {'force': True}
-        }, job=True)
-
-        assert acl_changed is True
-
-        # we already added the entry earlier.
-        # this check makes sure we're not adding duplicate entries.
-        acl = call('filesystem.getacl', ds_path)['acl']
-        assert check_for_entry(acl, 'USER', AclIds.user_to_add, {'BASIC': 'MODIFY'}), acl
-        assert check_for_entry(acl, 'GROUP', AclIds.group_to_add, {'BASIC': 'READ'}), acl
-        assert check_for_entry(acl, 'USER', AclIds.user2_to_add, {'BASIC': 'FULL_CONTROL'}), acl
diff --git a/tests/api2/test_pool_dataset_create.py b/tests/api2/test_pool_dataset_create.py
deleted file mode 100644
index c7a729d12bb38..0000000000000
--- a/tests/api2/test_pool_dataset_create.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-@pytest.mark.parametrize("child", ["a/b", "a/b/c"])
-def test_pool_dataset_create_ancestors(child):
-    with dataset("ancestors_create_test") as test_ds:
-        name = f"{test_ds}/{child}"
-        call("pool.dataset.create", {"name": name, "create_ancestors": True})
-        call("pool.dataset.get_instance", name)
diff --git a/tests/api2/test_pool_dataset_details.py b/tests/api2/test_pool_dataset_details.py
deleted file mode 100644
index b38d19e97796c..0000000000000
--- a/tests/api2/test_pool_dataset_details.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.cloud_sync import local_ftp_task
-from middlewared.test.integration.assets.pool import dataset, pool
-from middlewared.test.integration.utils import call, ssh
-
-
-@pytest.fixture(scope="module")
-def cloud_sync_fixture():
-    with dataset("test_pool_dataset_details") as test_ds:
-        with dataset("test_pool_dataset_details_other") as other_ds:
-            with local_ftp_task({
-                "path": f"/mnt/{pool}",
-            }) as task:
-                ssh(f"mkdir -p /mnt/{test_ds}/subdir")
-                ssh(f"mkdir -p /mnt/{other_ds}/subdir")
-                yield test_ds, other_ds, task
-
-
-@pytest.mark.parametrize("path,count", [
-    # A task that backs up the parent dataset backs up the child dataset too
-    (lambda test_ds, other_ds: f"/mnt/{pool}", 1),
-    # A task that backs up the dataself itself
-    (lambda test_ds, other_ds: f"/mnt/{test_ds}", 1),
-    # A task that backs up only the part of the dataset should not count
-    (lambda test_ds, other_ds: f"/mnt/{test_ds}/subdir", 0),
-    # Unrelated datasets should not count too
-    (lambda test_ds, other_ds: f"/mnt/{other_ds}", 0),
-    (lambda test_ds, other_ds: f"/mnt/{other_ds}/subdir", 0),
-])
-def test_cloud_sync(cloud_sync_fixture, path, count):
-    test_ds, other_ds, task = cloud_sync_fixture
-    call("cloudsync.update", task["id"], {"path": path(test_ds, other_ds)})
-
-    result = call("pool.dataset.details")
-    details = [
-        ds
-        for ds in result
-        if ds["name"] == test_ds
-    ][0]
-    assert details["cloudsync_tasks_count"] == count
diff --git a/tests/api2/test_pool_dataset_encrypted.py b/tests/api2/test_pool_dataset_encrypted.py
deleted file mode 100644
index d80069bfaabdf..0000000000000
--- a/tests/api2/test_pool_dataset_encrypted.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import errno
-
-import pytest
-
-from middlewared.service_exception import CallError, ValidationErrors
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-PASSPHRASE = "12345678"
-
-
-def encryption_props():
-    return {
-        "encryption_options": {"generate_key": False, "passphrase": PASSPHRASE},
-        "encryption": True,
-        "inherit_encryption": False
-    }
-
-
-def test_delete_locked_dataset():
-    with dataset("test_delete_locked_dataset", encryption_props()) as ds:
-        call("pool.dataset.lock", ds, job=True)
-
-    with pytest.raises(CallError) as ve:
-        call("filesystem.stat", f"/mnt/{ds}")
-
-    assert ve.value.errno == errno.ENOENT
-
-
-def test_unencrypted_dataset_within_encrypted_dataset():
-    with dataset("test_pool_dataset_witin_encryted", encryption_props()) as ds:
-        with pytest.raises(ValidationErrors) as ve:
-            call("pool.dataset.create", {
-                "name": f"{ds}/child",
-                "encryption": False,
-                "inherit_encryption": False,
-            })
-
-        assert any(
-            "Cannot create an unencrypted dataset within an encrypted dataset" in error.errmsg
-            for error in ve.value.errors
-        ) is True, ve
diff --git a/tests/api2/test_pool_dataset_encryption.py b/tests/api2/test_pool_dataset_encryption.py
deleted file mode 100644
index 8fa069d6bae10..0000000000000
--- a/tests/api2/test_pool_dataset_encryption.py
+++ /dev/null
@@ -1,406 +0,0 @@
-import contextlib
-import secrets
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, ssh
-from truenas_api_client.exc import ClientException
-
-
-# genrated token_hex 32bit for
-pool_token_hex = secrets.token_hex(32)
-dataset_token_hex = secrets.token_hex(32)
-
-encrypted_pool_name = 'test_encrypted'
-dataset = f'{encrypted_pool_name}/encrypted'
-child_dataset = f'{dataset}/child'
-passphrase = 'my_passphrase'
-
-
-def check_log_for(*phrases, should_find=False):
-    search_string = '|'.join(phrases)
-    cmd = f'grep -R -E "{search_string}" /var/log/middlewared.log'
-    results = ssh(cmd, check=False, complete_response=True)
-    assert results['result'] is should_find, str(results['output'])
-
-
-def verify_lock_status(ds, *, locked):
-    job_status_result = call('pool.dataset.encryption_summary', ds, job=True)
-    for dictionary in job_status_result:
-        if dictionary['name'] == ds:
-            assert dictionary['unlock_successful'] is not locked, str(job_status_result)
-            assert dictionary['locked'] is locked, str(job_status_result)
-            break
-    else:
-        pytest.fail(str(job_status_result))
-
-
-@contextlib.contextmanager
-def create_dataset(payload, **delete_args):
-    name = payload['name']
-    yield call('pool.dataset.create', payload)
-    assert call('pool.dataset.delete', name, delete_args)
-
-
-@pytest.fixture(scope='class')
-def normal_pool():
-    with another_pool({'name': encrypted_pool_name}):
-        yield
-
-
-@pytest.fixture(scope='class')
-def passphrase_pool():
-    pool_passphrase = 'my_pool_passphrase'
-    with another_pool({
-        'name': encrypted_pool_name,
-        'encryption': True,
-        'encryption_options': {
-            'algorithm': 'AES-128-CCM',
-            'passphrase': pool_passphrase,
-        },
-    }):
-        check_log_for(pool_passphrase)
-        ds = call('pool.dataset.get_instance', encrypted_pool_name)
-        assert ds['key_format']['value'] == 'PASSPHRASE', ds
-        assert ds['encryption_algorithm']['value'] == 'AES-128-CCM', ds
-        yield
-
-
-@pytest.fixture(scope='class')
-def key_pool():
-    with another_pool({
-        'name': encrypted_pool_name,
-        'encryption': True,
-        'encryption_options': {
-            'algorithm': 'AES-128-CCM',
-            'key': pool_token_hex,
-        },
-    }):
-        check_log_for(pool_token_hex)
-        ds = call('pool.dataset.get_instance', encrypted_pool_name)
-        assert ds['key_format']['value'] == 'HEX', ds
-        assert ds['encryption_algorithm']['value'] == 'AES-128-CCM', ds
-        yield
-
-
-@pytest.mark.usefixtures('normal_pool')
-class TestNormalPool:
-
-    def test_passphrase_encrypted_root(self):
-        payload = {
-            'name': dataset,
-            'encryption_options': {
-                'generate_key': False,
-                'pbkdf2iters': 100000,
-                'algorithm': 'AES-128-CCM',
-                'passphrase': passphrase,
-            },
-            'encryption': True,
-            'inherit_encryption': False
-        }
-        with create_dataset(payload) as ds:
-            assert ds['key_format']['value'] == 'PASSPHRASE'
-            check_log_for(passphrase)
-
-            # Add a comment
-            call('pool.dataset.update', dataset, {'comments': 'testing encrypted dataset'})
-
-            # Change to key encryption
-            call('pool.dataset.change_key', dataset, {'key': dataset_token_hex}, job=True)
-            ds = call('pool.dataset.get_instance', dataset)
-            assert ds['key_format']['value'] == 'HEX'
-
-    @pytest.mark.parametrize('payload', [
-        {'encryption': False},
-        {'inherit_encryption': True}
-    ])
-    def test_dataset_not_encrypted(self, payload: dict):
-        payload['name'] = dataset
-        with create_dataset(payload) as ds:
-            assert ds['key_format']['value'] is None
-
-    @pytest.mark.parametrize('payload, message', [
-        (
-            {
-                'encryption_options': {'pbkdf2iters': 0},
-                'inherit_encryption': False
-            },
-            'Should be greater than or equal to 100000'
-        ),
-        (
-            {
-                'encryption_options': {'passphrase': passphrase},
-                'inherit_encryption': True
-            },
-            'Must be disabled when encryption is enabled'
-        ),
-        (
-            {
-                'encryption_options': {
-                    'generate_key': True,
-                    'passphrase': passphrase,
-                },
-                'inherit_encryption': False
-            },
-            'Must be disabled when dataset is to be encrypted with passphrase'
-        )
-    ])
-    def test_try_to_create_invalid_encrypted_dataset(self, payload: dict, message: str):
-        payload.update({
-            'name': dataset,
-            'encryption': True,
-        })
-        with pytest.raises(ValidationErrors, match=message):
-            with create_dataset(payload): pass
-
-    def test_invalid_encrypted_dataset_does_not_leak_passphrase_into_middleware_log(self):
-        check_log_for(passphrase)
-
-    @pytest.mark.parametrize('payload', [
-        {'encryption_options': {'generate_key': True}},
-        {'encryption_options': {'key': dataset_token_hex}}
-    ])
-    def test_encrypted_root_with_key_cannot_lock(self, payload: dict):
-        payload.update({
-            'name': dataset,
-            'encryption': True,
-            'inherit_encryption': False,
-        })
-        with create_dataset(payload) as ds:
-            assert ds['key_format']['value'] == 'HEX'
-            check_log_for(dataset_token_hex)
-
-            with pytest.raises(ClientException, match='Only datasets which are encrypted with passphrase can be locked'):
-                call('pool.dataset.lock', dataset, {'force_umount': True}, job=True)
-            
-    def test_encrypted_root_lock_unlock(self):
-        # Start with key-encrypted dataset
-        payload = {
-            'name': dataset,
-            'encryption': True,
-            'inherit_encryption': False,
-            'encryption_options': {'key': dataset_token_hex}
-        }
-        with create_dataset(payload):
-            # Change to a passphrase-encrypted dataset
-            call('pool.dataset.change_key', dataset, {'passphrase': passphrase}, job=True)
-            ds = call('pool.dataset.get_instance', dataset)
-            assert ds['key_format']['value'] == 'PASSPHRASE'
-            check_log_for(passphrase)
-
-            # Lock it
-            assert call('pool.dataset.lock', dataset, {'force_umount': True}, job=True)
-            verify_lock_status(dataset, locked=True)
-
-            # Attempt to unlock with incorrect passphrase
-            payload = {
-                'recursive': True,
-                'datasets': [{
-                    'name': dataset,
-                    'passphrase': 'bad_passphrase'
-                }]
-            }
-            job_status = call('pool.dataset.unlock', dataset, payload, job=True)
-            assert job_status['failed'][dataset]['error'] == 'Invalid Key', job_status
-            verify_lock_status(dataset, locked=True)
-
-            # Now actually unlock it
-            payload['datasets'][0]['passphrase'] = passphrase
-            job_status = call('pool.dataset.unlock', dataset, payload, job=True)
-            assert job_status['unlocked'] == [dataset], job_status
-            verify_lock_status(dataset, locked=False)
-
-
-@pytest.mark.usefixtures('passphrase_pool')
-class TestPassphraseEncryptedPool:
-
-    def test_passphrase_encrypted_root_cannot_change_key(self):
-        payload = {
-            'name': dataset,
-            'encryption_options': {
-                'generate_key': False,
-                'pbkdf2iters': 100000,
-                'algorithm': 'AES-128-CCM',
-                'passphrase': passphrase,
-            },
-            'encryption': True,
-            'inherit_encryption': False
-        }
-        with create_dataset(payload):
-            check_log_for(passphrase)
-            with pytest.raises(Exception, match=f'{dataset} has parent\\(s\\) which are encrypted with a passphrase'):
-                call('pool.dataset.change_key', dataset, {'key': dataset_token_hex}, job=True)
-
-    def test_passphrase_encrypted_root_cannot_change_key_does_not_leak_passphrase_into_middleware_log(self):
-        check_log_for(passphrase)
-
-    def test_create_dataset_to_inherit_encryption_from_passphrase_encrypted_pool(self):
-        payload = {
-            'name': dataset,
-            'inherit_encryption': True
-        }
-        with create_dataset(payload) as ds:
-            assert ds['key_format']['value'] == 'PASSPHRASE', ds
-    
-    @pytest.mark.parametrize('payload', [
-        {'encryption_options': {'generate_key': True}},
-        {'encryption_options': {'key': dataset_token_hex}},
-    ])
-    def test_try_to_create_invalid_encrypted_dataset(self, payload: dict):
-        payload.update({
-            'name': dataset,
-            'encryption': True,
-            'inherit_encryption': False
-        })
-        with pytest.raises(ValidationErrors, match='Passphrase encrypted datasets cannot have children encrypted with a key'):
-            with create_dataset(payload): pass
-
-    def test_try_to_create_invalid_encrypted_dataset_does_not_leak_encryption_key_into_middleware_log(self):
-        check_log_for(dataset_token_hex)
-
-
-@pytest.mark.usefixtures('key_pool')
-class TestKeyEncryptedPool:
-
-    def test_key_encrypted_root(self):
-        # Start with key-encrypted dataset
-        payload = {
-            'name': dataset,
-            'encryption_options': {'key': dataset_token_hex},
-            'encryption': True,
-            'inherit_encryption': False
-        }
-        with create_dataset(payload) as ds:
-            assert ds['key_format']['value'] == 'HEX', ds
-            check_log_for(dataset_token_hex)
-
-            # Change to passphrase encryption
-            call('pool.dataset.change_key', dataset, {'passphrase': passphrase}, job=True)
-            check_log_for(passphrase)
-            ds = call('pool.dataset.get_instance', dataset)
-            assert ds['key_format']['value'] == 'PASSPHRASE', ds
-
-            # Lock the dataset
-            assert call('pool.dataset.lock', dataset, {'force_umount': True}, job=True)
-            ds = call('pool.dataset.get_instance', dataset)
-            assert ds['locked'] is True, ds
-            verify_lock_status(dataset, locked=True)
-
-            # Unlock the dataset
-            payload = {
-                'recursive': True,
-                'datasets': [{
-                    'name': dataset,
-                    'passphrase': passphrase
-                }]
-            }
-            job_status = call('pool.dataset.unlock', dataset, payload, job=True)
-            assert job_status['unlocked'] == [dataset], job_status
-            check_log_for(passphrase)
-            verify_lock_status(dataset, locked=False)
-
-    def test_dataset_with_inherit_encryption(self):
-        payload = {
-            'name': dataset,
-            'inherit_encryption': True
-        }
-        with create_dataset(payload) as ds:
-            assert ds['key_format']['value'] == 'HEX', ds
-
-    def test_encrypted_dataset_with_generate_key(self):
-        payload = {
-            'name': dataset,
-            'encryption_options': {'generate_key': True},
-            'encryption': True,
-            'inherit_encryption': False
-        }
-        with create_dataset(payload): pass
-
-    def test_passphrase_encrypted_dataset_parent_child_lock_unlock(self):
-        payload = {
-            'name': dataset,
-            'encryption_options': {'passphrase': passphrase},
-            'encryption': True,
-            'inherit_encryption': False
-        }
-        with create_dataset(payload, recursive=True):  # Create parent dataset
-            check_log_for(passphrase)
-
-            # Create child dataset
-            child_passphrase = 'my_passphrase2'
-            payload.update({
-                'name': child_dataset,
-                'encryption_options': {'passphrase': child_passphrase},
-            })
-            call('pool.dataset.create', payload)
-            check_log_for(child_passphrase)
-
-            # Lock parent (and child)
-            assert call('pool.dataset.lock', dataset, job=True)
-            for ds_name in (dataset, child_dataset):
-                ds = call('pool.dataset.get_instance', ds_name)
-                assert ds['locked'] is True, ds
-                verify_lock_status(ds_name, locked=True)
-
-            # Try to unlock child
-            payload = {
-                'recursive': True,
-                'datasets': [{
-                    'name': child_dataset,
-                    'passphrase': child_passphrase
-                }]
-            }
-            with pytest.raises(ClientException, match=f'{child_dataset} has locked parents {dataset} which must be unlocked first'):
-                call('pool.dataset.unlock', child_dataset, payload, job=True)
-            check_log_for(child_passphrase)
-            verify_lock_status(child_dataset, locked=True)
-
-            # Unlock parent (and child)
-            payload = {
-                'recursive': True,
-                'datasets': [
-                    {
-                        'name': dataset,
-                        'passphrase': passphrase
-                    },
-                    {
-                        'name': child_dataset,
-                        'passphrase': child_passphrase
-                    }
-                ]
-            }
-            job_status = call('pool.dataset.unlock', dataset, payload, job=True)
-            assert job_status['unlocked'] == [dataset, child_dataset], job_status
-            check_log_for(passphrase, child_passphrase)
-            for ds_name in (dataset, child_dataset):
-                ds = call('pool.dataset.get_instance', ds_name)
-                assert ds['locked'] is False, ds
-                verify_lock_status(ds_name, locked=False)
-
-    def test_key_encrypted_dataset(self):
-        # Create parent dataset
-        payload = {
-            'name': dataset,
-            'encryption_options': {'key': dataset_token_hex},
-            'encryption': True,
-            'inherit_encryption': False
-        }
-        call('pool.dataset.create', payload)
-        check_log_for(dataset_token_hex)
-
-        # Create child dataset
-        payload.update({
-            'name': child_dataset,
-            'encryption_options': {'passphrase': passphrase},
-        })
-        call('pool.dataset.create', payload)
-        check_log_for(passphrase)
-        ds = call('pool.dataset.get_instance', child_dataset)
-        assert ds['key_format']['value'] == 'PASSPHRASE', ds
-
-        # Inherit key encryption from parent
-        call('pool.dataset.inherit_parent_encryption_properties', child_dataset)
-        ds = call('pool.dataset.get_instance', child_dataset)
-        assert ds['key_format']['value'] == 'HEX', ds
diff --git a/tests/api2/test_pool_dataset_info.py b/tests/api2/test_pool_dataset_info.py
deleted file mode 100644
index 8d1b7c4cd60e4..0000000000000
--- a/tests/api2/test_pool_dataset_info.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.pool import pool
-
-
-def test_recommended_zvol_blocksize():
-    assert call("pool.dataset.recommended_zvol_blocksize", pool) == "16K"
diff --git a/tests/api2/test_pool_dataset_processes.py b/tests/api2/test_pool_dataset_processes.py
deleted file mode 100644
index 4817cb92f8510..0000000000000
--- a/tests/api2/test_pool_dataset_processes.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.pool import another_pool
-
-import os
-import sys
-sys.path.append(os.getcwd())
-
-
-def test_empty_for_locked_root_dataset():
-    with another_pool({"encryption": True, "encryption_options": {"passphrase": "passphrase"}}):
-        call("pool.dataset.lock", "test", job=True)
-        assert call("pool.dataset.processes", "test") == []
diff --git a/tests/api2/test_pool_dataset_quota_alert.py b/tests/api2/test_pool_dataset_quota_alert.py
deleted file mode 100644
index 8c8defb6bd988..0000000000000
--- a/tests/api2/test_pool_dataset_quota_alert.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import re
-
-import pytest
-from pytest_dependency import depends
-
-from auto_config import pool_name, user, password
-from functions import SSH_TEST
-from middlewared.test.integration.utils import call
-
-
-G = 1024 * 1024 * 1024
-
-
-@pytest.mark.parametrize("datasets,expected_alerts", [
-    (
-        {
-            "": {
-                "used": 900,
-                "quota": 1 * G,
-            }
-        },
-        [
-            {"formatted": r"Quota exceeded on dataset tank/quota_test. Used 8|9[0-9.]+% \(8|9[0-9.]+ MiB of 1 GiB\)."},
-        ]
-    ),
-    (
-        {
-            "": {
-                "used": 118,
-                "quota": 10 * G,
-                "refquota": 1 * G,
-            }
-        },
-        [
-            # There was a false positive:
-            # {"formatted": r"Quota exceeded on dataset tank/quota_test. Used 91.[0-9]+% \(9.[0-9]+ GiB of 10 GiB\)."},
-        ]
-    ),
-    (
-        {
-            "": {
-                "used": 100,
-                "quota": 1000000000 * G,
-            }
-        },
-        [
-            # There should be no quota alerts if quota is set to a larger value than dataset size
-        ]
-    ),
-])
-def test_dataset_quota_alert(request, datasets, expected_alerts):
-    assert "" in datasets
-
-    try:
-        for dataset, params in datasets.items():
-            used = params.pop("used", None)
-
-            call("pool.dataset.create", {"name": f"{pool_name}/quota_test/{dataset}".rstrip("/"), **params})
-
-            if used is not None:
-                results = SSH_TEST(f'dd if=/dev/urandom of=/mnt/{pool_name}/quota_test/{dataset}/blob '
-                                   f'bs=1M count={used}', user, password)
-                assert results['result'] is True, results
-
-        call("alert.initialize")
-        call("core.bulk", "alert.process_alerts", [[]], job=True)
-
-        alerts = [alert for alert in call("alert.list") if alert["source"] == "Quota"]
-        assert len(alerts) == len(expected_alerts), alerts
-
-        for alert, expected_alert in zip(alerts, expected_alerts):
-            for k, v in expected_alert.items():
-                if k == "formatted":
-                    assert re.match(v, alert[k]), (alert, expected_alert, k)
-                else:
-                    assert alert[k] == v, (alert, expected_alert, k)
-    finally:
-        call("pool.dataset.delete", f"{pool_name}/quota_test", {
-            "recursive": True,
-        })
diff --git a/tests/api2/test_pool_dataset_snapshot_count.py b/tests/api2/test_pool_dataset_snapshot_count.py
deleted file mode 100644
index b004d5f60d535..0000000000000
--- a/tests/api2/test_pool_dataset_snapshot_count.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import textwrap
-
-import pytest
-
-from middlewared.test.integration.utils import call, mock
-from middlewared.test.integration.assets.pool import dataset
-
-import os
-import sys
-sys.path.append(os.getcwd())
-
-
-def test_empty_for_locked_root_dataset():
-    with dataset("test_pool_dataset_snapshot_count") as ds:
-        for i in range(7):
-            call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{i}"})
-
-        with mock("zfs.snapshot.query", textwrap.dedent("""\
-            def mock(self, *args):
-                raise Exception("Should not be called")
-        """)):
-            assert call("pool.dataset.snapshot_count", ds) == 7
diff --git a/tests/api2/test_pool_dataset_track_processes.py b/tests/api2/test_pool_dataset_track_processes.py
deleted file mode 100644
index 693896f860d9b..0000000000000
--- a/tests/api2/test_pool_dataset_track_processes.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import contextlib
-import time
-
-import pytest
-from pytest_dependency import depends
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.assets.pool import dataset, pool
-
-import os
-import sys
-sys.path.append(os.getcwd())
-
-
-@pytest.mark.parametrize("datasets,file_open_path,arg_path", [
-    # A file on a dataset
-    (
-        [('test', None)],
-        f'/mnt/{pool}/test/test_file',
-        lambda ssh: f'/mnt/{pool}/test',
-    ),
-    # zvol
-    (
-        [('test', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100})],
-        f'/dev/zvol/{pool}/test',
-        lambda ssh: f'/dev/zvol/{pool}/test'
-    ),
-    # zvol with /dev/zd* path
-    (
-        [('test', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100})],
-        f'/dev/zvol/{pool}/test',
-        lambda ssh: ssh(f'readlink -f /dev/zvol/{pool}/test').strip(),
-    ),
-    # A dataset with nested zvol
-    (
-        [
-            ('test', None),
-            ('test/zvol', {'type': 'VOLUME', 'volsize': 1024 * 1024 * 100}),
-        ],
-        f'/dev/zvol/{pool}/test/zvol',
-        lambda ssh: f'/dev/zvol/{pool}/test',
-    ),
-])
-def test__open_path_and_check_proc(request, datasets, file_open_path, arg_path):
-    with contextlib.ExitStack() as stack:
-        for name, data in datasets:
-            stack.enter_context(dataset(name, data))
-
-        opened = False
-        try:
-            test_file = file_open_path
-            open_pid = ssh(f"""python -c 'import time; f = open("{test_file}", "w+"); time.sleep(10)' > /dev/null 2>&1 & echo $!""")
-            open_pid = open_pid.strip()
-            assert open_pid.isdigit(), f'{open_pid!r} is not a digit'
-            opened = True
-
-            # spinning up python interpreter could take some time on busy system so sleep
-            # for a couple seconds to give it time
-            time.sleep(2)
-
-            # what the cmdline output is formatted to
-            cmdline = f"""python -c import time; f = open(\"{test_file}\", \"w+\"); time.sleep(10)"""
-
-            # have to use websocket since the method being called is private
-            res = call('pool.dataset.processes_using_paths', [arg_path(ssh)])
-            assert len(res) == 1
-
-            result = res[0]
-            assert result['pid'] == open_pid, f'{result["pid"]!r} does not match {open_pid!r}'
-            assert result['cmdline'] == cmdline, f'{result["cmdline"]!r} does not match {cmdline!r}'
-            assert 'paths' not in result
-
-            res = call('pool.dataset.processes_using_paths', [arg_path(ssh)], True)
-            assert len(res) == 1
-            result = res[0]
-            assert result['pid'] == open_pid, f'{result["pid"]!r} does not match {open_pid!r}'
-            assert result['cmdline'] == cmdline, f'{result["cmdline"]!r} does not match {cmdline!r}'
-            assert 'paths' in result
-            assert len(result['paths']) == 1
-            assert result['paths'][0] == test_file if test_file.startswith('/mnt') else '/dev/zd0'
-
-        finally:
-            if opened:
-                ssh(f'kill -9 {open_pid}', check=False)
diff --git a/tests/api2/test_pool_dataset_unlock.py b/tests/api2/test_pool_dataset_unlock.py
deleted file mode 100644
index 9ba1eac7ca8b1..0000000000000
--- a/tests/api2/test_pool_dataset_unlock.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import contextlib
-
-import pytest
-
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call, ssh
-from protocols import SMB
-from samba import ntstatus, NTSTATUSError
-
-
-SMB_PASSWORD = 'Abcd1234'
-SMB_USER = 'smbuser999'
-
-
-def passphrase_encryption():
-    return {
-        'encryption_options': {
-            'generate_key': False,
-            'pbkdf2iters': 100000,
-            'algorithm': 'AES-128-CCM',
-            'passphrase': 'passphrase',
-        },
-        'encryption': True,
-        'inherit_encryption': False,
-    }
-
-def lock_dataset(name):
-    payload = {
-        'force_umount': True
-    }
-    assert call('pool.dataset.lock', name, payload, job=True)
-
-
-def unlock_dataset(name, options=None):
-    payload = {
-        'recursive': True,
-        'datasets': [
-            {
-                'name': name,
-                'passphrase': 'passphrase'
-            }
-        ],
-        **(options or {}),
-    }
-    result = call('pool.dataset.unlock', name, payload, job=True)
-    assert result['unlocked'] == [name], str(result)
-
-
-@contextlib.contextmanager
-def smb_connection(**kwargs):
-    c = SMB()
-    c.connect(**kwargs)
-
-    try:
-        yield c
-    finally:
-        c.disconnect()
-
-
-@pytest.fixture(scope='module')
-def smb_user():
-    with user({
-        'username': SMB_USER,
-        'full_name': 'doug',
-        'group_create': True,
-        'password': SMB_PASSWORD,
-        'smb': True
-    }, get_instance=True) as u:
-        yield u
-
-
-@pytest.mark.parametrize('toggle_attachments', [True, False])
-def test_pool_dataset_unlock_smb(smb_user, toggle_attachments):
-    with (
-        # Prepare test SMB share
-        dataset('normal', mode='777') as normal,
-        smb_share(f'/mnt/{normal}', 'normal', {'guestok': True}),
-        # Create an encrypted SMB share, unlocking which might lead to SMB service interruption
-        dataset('encrypted', passphrase_encryption(), mode='777') as encrypted,
-        smb_share(f'/mnt/{encrypted}', 'encrypted', {'guestok': True})
-    ):
-        ssh(f'touch /mnt/{encrypted}/secret')
-        assert call('service.start', 'cifs')
-        lock_dataset(encrypted)
-        # Mount test SMB share
-        with smb_connection(
-            share='normal',
-            username=SMB_USER,
-            password=SMB_PASSWORD
-        ) as normal_connection:
-            # Locked share should not be mountable
-            with pytest.raises(NTSTATUSError) as e:
-                with smb_connection(
-                    share='encrypted',
-                    username=SMB_USER,
-                    password=SMB_PASSWORD
-                ):
-                    pass
-
-            assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME
-
-            conn = normal_connection.show_connection()
-            assert conn['connected'], conn
-            unlock_dataset(encrypted, {'toggle_attachments': toggle_attachments})
-
-            conn = normal_connection.show_connection()
-            assert conn['connected'], conn
-
-        if toggle_attachments:
-            # We should be able to mount encrypted share
-            with smb_connection(
-                share='encrypted',
-                username=SMB_USER,
-                password=SMB_PASSWORD
-            ) as encrypted_connection:
-                assert [x['name'] for x in encrypted_connection.ls('')] == ['secret']
-        else:
-            # We should still not be able to mount encrypted share as we did not reload attachments
-            with pytest.raises(NTSTATUSError) as e:
-                with smb_connection(
-                    share='encrypted',
-                    username=SMB_USER,
-                    password=SMB_PASSWORD
-                ):
-                    pass
-
-            assert e.value.args[0] == ntstatus.NT_STATUS_BAD_NETWORK_NAME
-
-    assert call('service.stop', 'cifs')
diff --git a/tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py b/tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py
deleted file mode 100644
index 9ee3268fbb657..0000000000000
--- a/tests/api2/test_pool_dataset_unlock_lock_immutable_flags.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-
-PASSPHRASE = '12345678'
-
-
-def is_immutable(path: str) -> bool:
-    attrs = call('filesystem.stat', path)['attributes']
-    return 'IMMUTABLE' in attrs
-
-
-def encryption_props():
-    return {
-        'encryption_options': {'generate_key': False, 'passphrase': PASSPHRASE},
-        'encryption': True,
-        'inherit_encryption': False
-    }
-
-
-def test_lock_sets_immutable_flag():
-    with dataset('parent', encryption_props()) as parent_ds:
-        with dataset('parent/child', encryption_props()) as child_ds:
-            child_ds_mountpoint = os.path.join('/mnt', child_ds)
-            assert is_immutable(child_ds_mountpoint) is False, child_ds_mountpoint
-            call('pool.dataset.lock', child_ds, job=True)
-            assert is_immutable(child_ds_mountpoint) is True, child_ds_mountpoint
-
-        parent_mountpoint = os.path.join('/mnt', parent_ds)
-        assert is_immutable(parent_mountpoint) is False, parent_mountpoint
-        call('pool.dataset.lock', parent_ds, job=True)
-        assert is_immutable(parent_mountpoint) is True, parent_mountpoint
-
-
-def test_unlock_unsets_immutable_flag():
-    with dataset('parent', encryption_props()) as parent_ds:
-        parent_mountpoint = os.path.join('/mnt', parent_ds)
-        with dataset('parent/child', encryption_props()) as child_ds:
-            child_ds_mountpoint = os.path.join('/mnt', child_ds)
-            call('pool.dataset.lock', parent_ds, job=True)
-            assert is_immutable(parent_mountpoint) is True, parent_mountpoint
-
-            call('pool.dataset.unlock', parent_ds, {
-                'datasets': [{'name': parent_ds, 'passphrase': PASSPHRASE}, {'name': child_ds, 'passphrase': 'random'}],
-                'recursive': True,
-            }, job=True)
-            assert is_immutable(parent_mountpoint) is False, parent_mountpoint
-            assert is_immutable(child_ds_mountpoint) is True, child_ds_mountpoint
-            call('pool.dataset.unlock', child_ds, {
-                'datasets': [{'name': child_ds, 'passphrase': PASSPHRASE}],
-            }, job=True)
-            assert is_immutable(child_ds_mountpoint) is False, child_ds_mountpoint
diff --git a/tests/api2/test_pool_dataset_unlock_recursive.py b/tests/api2/test_pool_dataset_unlock_recursive.py
deleted file mode 100644
index 66a953d7da291..0000000000000
--- a/tests/api2/test_pool_dataset_unlock_recursive.py
+++ /dev/null
@@ -1,40 +0,0 @@
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.assets.pool import pool
-
-
-def test_pool_dataset_unlock_recursive():
-    key = "0" * 32
-    try:
-        ssh(f"echo -n '{key}' > /tmp/key")
-        ssh(f"zfs create -o encryption=on -o keyformat=raw -o keylocation=file:///tmp/key {pool}/test")
-        ssh(f"zfs create -o encryption=on -o keyformat=raw -o keylocation=file:///tmp/key {pool}/test/nested")
-        ssh(f"echo TEST > /mnt/{pool}/test/nested/file")
-        ssh("rm /tmp/key")
-        ssh(f"zfs set readonly=on {pool}/test")
-        ssh(f"zfs set readonly=on {pool}/test/nested")
-        ssh(f"zfs unmount {pool}/test")
-        ssh(f"zfs unload-key -r {pool}/test")
-
-        result = call("pool.dataset.unlock", f"{pool}/test", {
-            "recursive": True,
-            "datasets": [
-                {
-                    "name": f"{pool}/test",
-                    "key": key.encode("ascii").hex(),
-                    "recursive": True,
-                },
-            ],
-        }, job=True)
-        assert not result["failed"]
-
-        assert not call("pool.dataset.get_instance", f"{pool}/test")["locked"]
-        assert not call("pool.dataset.get_instance", f"{pool}/test/nested")["locked"]
-
-        # Ensure the child dataset is mounted
-        assert ssh(f"cat /mnt/{pool}/test/nested/file") == "TEST\n"
-
-        # Ensure the keys are stored in the database to be able to unlock the datasets after reboot
-        assert call("datastore.query", "storage.encrypteddataset", [["name", "=", f"{pool}/test"]])
-        assert call("datastore.query", "storage.encrypteddataset", [["name", "=", f"{pool}/test/nested"]])
-    finally:
-        call("pool.dataset.delete", f"{pool}/test", {"recursive": True})
diff --git a/tests/api2/test_pool_dataset_unlock_restart_vms.py b/tests/api2/test_pool_dataset_unlock_restart_vms.py
deleted file mode 100644
index 85897416f8a35..0000000000000
--- a/tests/api2/test_pool_dataset_unlock_restart_vms.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, mock, ssh
-
-
-PASSPHRASE = "12345678"
-
-
-def encryption_props():
-    return {
-        "encryption_options": {"generate_key": False, "passphrase": PASSPHRASE},
-        "encryption": True,
-        "inherit_encryption": False
-    }
-
-
-@pytest.mark.parametrize("zvol", [True, False])
-def test_restart_vm_on_dataset_unlock(zvol):
-    if zvol:
-        data = {"type": "VOLUME", "volsize": 1048576}
-    else:
-        data = {}
-
-    with dataset("test", {**data, **encryption_props()}) as ds:
-        call("pool.dataset.lock", ds, job=True)
-
-        if zvol:
-            device = {"attributes": {"path": f"/dev/zvol/{ds}", "dtype": "DISK"}}
-        else:
-            device = {"attributes": {"path": f"/mnt/{ds}/child", "dtype": "RAW"}}
-
-        with mock("vm.query", return_value=[{"id": 1, "devices": [device]}]):
-            with mock("vm.status", return_value={"state": "RUNNING"}):
-                ssh("rm -f /tmp/test-vm-stop")
-                with mock("vm.stop", """
-                    from middlewared.service import job
-
-                    @job()
-                    def mock(self, job, *args):
-                        with open("/tmp/test-vm-stop", "w") as f:
-                            pass
-                """):
-                    ssh("rm -f /tmp/test-vm-start")
-                    with mock("vm.start", declaration="""
-                        def mock(self, job, *args):
-                            with open("/tmp/test-vm-start", "w") as f:
-                                pass
-                    """):
-                        call(
-                            "pool.dataset.unlock",
-                            ds,
-                            {"datasets": [{"name": ds, "passphrase": PASSPHRASE}]},
-                            job=True,
-                        )
-
-                        call("filesystem.stat", "/tmp/test-vm-stop")
-                        call("filesystem.stat", "/tmp/test-vm-start")
diff --git a/tests/api2/test_pool_expand.py b/tests/api2/test_pool_expand.py
deleted file mode 100644
index 05af372ccf674..0000000000000
--- a/tests/api2/test_pool_expand.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import time
-
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, ssh
-
-
-def retry_get_parts_on_disk(disk, max_tries=10):
-    for i in range(max_tries):
-        if parts := call('disk.list_partitions', disk):
-            return parts
-        time.sleep(1)
-    else:
-        assert False, f'Failed after {max_tries} seconds for partition info on {disk!r}'
-
-
-def test_expand_pool():
-    with another_pool() as pool:
-        disk = pool["topology"]["data"][0]["disk"]
-        original_partition_size = call("disk.list_partitions", disk)[-1]["size"]
-        # Ensure that the test pool vdev is way larger than 2 GiB
-        assert original_partition_size > 2147483648 * 2
-
-        # Transform this pool into a pool on a vdev with a partition that is only 2 GiB
-        ssh(f"zpool export {pool['name']}")
-        ssh(f"sgdisk -d 1 /dev/{disk}")
-        ssh(f"sgdisk -n 1:0:+2GiB -t 1:BF01 /dev/{disk}")
-        small_partition = retry_get_parts_on_disk(disk)[-1]
-        assert small_partition["size"] < 2147483648 * 1.01
-        device = "disk/by-partuuid/" + small_partition["partition_uuid"]
-        ssh(f"zpool create {pool['name']} -o altroot=/mnt -f {device}")
-        # Ensure that the pool size is small now
-        assert call("pool.get_instance", pool["id"])["size"] < 2147483648 * 1.01
-        ssh(f"touch /mnt/{pool['name']}/test")
-        call("pool.expand", pool["id"], job=True)
-
-        new_partition = call("disk.list_partitions", disk)[-1]
-        # Ensure that the partition size is way larger than 2 GiB
-        assert new_partition["size"] > 2147483648 * 2
-        # Ensure that the pool size was increased
-        assert call("pool.get_instance", pool["id"])["size"] > 2147483648 * 2
-        # Ensure that data was not destroyed
-        assert ssh(f"ls /mnt/{pool['name']}") == "test\n"
-
-
-def test_expand_partition_keeps_initial_offset():
-    disk = call("disk.get_unused")[0]["name"]
-    call("disk.wipe", disk, "QUICK", job=True)
-    ssh(f"sgdisk -n 0:8192:1GiB /dev/{disk}")
-    partition = retry_get_parts_on_disk(disk)[0]
-    call("pool.expand_partition", partition)
-    expanded_partition = retry_get_parts_on_disk(disk)[0]
-    assert expanded_partition["size"] > partition["size"]
-    assert expanded_partition["start"] == partition["start"]
diff --git a/tests/api2/test_pool_export.py b/tests/api2/test_pool_export.py
deleted file mode 100644
index 1669702155f98..0000000000000
--- a/tests/api2/test_pool_export.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import pytest
-
-from truenas_api_client import ClientException
-
-from middlewared.test.integration.assets.pool import another_pool, pool as pool_name
-from middlewared.test.integration.utils import call, disable_failover, mock
-
-
-def test_systemdataset_migrate_error():
-    """
-    On HA this test will fail with the error below if failover is enabled:
-    [ENOTSUP] Disable failover before exporting last pool on system.
-    """
-    with disable_failover():
-        pool = call("pool.query", [["name", "=", pool_name]], {"get": True})
-
-        with mock("systemdataset.update", """\
-            from middlewared.service import job, CallError
-
-            @job()
-            def mock(self, job, *args):
-                raise CallError("Test error")
-        """):
-            with pytest.raises(ClientException) as e:
-                call("pool.export", pool["id"], job=True)
-
-            assert e.value.error == (
-                "[EFAULT] This pool contains system dataset, but its reconfiguration failed: [EFAULT] Test error"
-            )
-
-
-def test_destroy_offline_disks():
-    with another_pool(topology=(2, lambda disks: {
-        "data": [
-            {"type": "MIRROR", "disks": disks[0:2]},
-        ],
-    })) as pool:
-        disk = pool["topology"]["data"][0]["children"][0]
-
-        call("pool.offline", pool["id"], {"label": disk["guid"]})
-
-        call("pool.export", pool["id"], {"destroy": True}, job=True)
-
-        unused = [unused for unused in call("disk.get_unused") if unused["name"] == disk["disk"]][0]
-
-        assert unused["exported_zpool"] is None
diff --git a/tests/api2/test_pool_is_upgraded.py b/tests/api2/test_pool_is_upgraded.py
deleted file mode 100644
index 19d9050ad2b11..0000000000000
--- a/tests/api2/test_pool_is_upgraded.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import another_pool, pool
-from middlewared.test.integration.utils import call, ssh
-
-
-@pytest.fixture(scope="module")
-def outdated_pool():
-    with another_pool() as pool:
-        device = pool["topology"]["data"][0]["path"]
-        ssh(f"zpool export {pool['name']}")
-        ssh(f"zpool create {pool['name']} -o altroot=/mnt -o feature@sha512=disabled -f {device}")
-        yield pool
-
-
-def test_is_upgraded():
-    pool_id = call("pool.query", [["name", "=", pool]])[0]["id"]
-    assert call("pool.is_upgraded", pool_id)
-
-
-def test_is_outdated(outdated_pool):
-    assert call("pool.is_upgraded", outdated_pool["id"]) is False
-
-
-def test_is_outdated_in_list(outdated_pool):
-    pool = call("pool.query", [["id", "=", outdated_pool["id"]]], {"extra": {"is_upgraded": True}})[0]
-    assert pool["is_upgraded"] is False
-
-
-# Flaky as one-shot alert creation might be delayed until `alert.process_alerts` completion.
-@pytest.mark.flaky(reruns=5, reruns_delay=5)
-def test_is_outdated_alert(outdated_pool):
-    alerts = call("alert.list")
-    assert any((i["klass"] == "PoolUpgraded" and i["args"] == outdated_pool["name"] for i in alerts))
diff --git a/tests/api2/test_pool_is_upgraded_alert_removal.py b/tests/api2/test_pool_is_upgraded_alert_removal.py
deleted file mode 100644
index 3c43395c6ef4f..0000000000000
--- a/tests/api2/test_pool_is_upgraded_alert_removal.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import contextlib
-import time
-
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, ssh
-
-
-def assert_has_outdated_pool_alert(pool_name, has):
-    for i in range(60):
-        alerts = call("alert.list")
-        if any((i["klass"] == "PoolUpgraded" and i["args"] == pool_name for i in alerts)) == has:
-            break
-
-        time.sleep(1)
-    else:
-        assert False, alerts
-
-
-@contextlib.contextmanager
-def outdated_pool():
-    with another_pool() as pool:
-        device = pool["topology"]["data"][0]["path"]
-        ssh(f"zpool export {pool['name']}")
-        ssh(f"zpool create test -o altroot=/mnt -o feature@sha512=disabled -f {device}")
-        assert_has_outdated_pool_alert(pool["name"], True)
-        yield pool
-
-
-def test_outdated_pool_alert_removed_on_pool_upgrade():
-    with outdated_pool() as pool:
-        call("pool.upgrade", pool["id"])
-
-        assert_has_outdated_pool_alert(pool["name"], False)
-
-
-def test_outdated_pool_alert_removed_on_pool_delete():
-    with outdated_pool() as pool:
-        pass
-
-    assert_has_outdated_pool_alert(pool["name"], False)
diff --git a/tests/api2/test_pool_remove_disk.py b/tests/api2/test_pool_remove_disk.py
deleted file mode 100644
index 38ca637054d9a..0000000000000
--- a/tests/api2/test_pool_remove_disk.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, ssh
-
-
-def test_waits_for_device_removal():
-    with another_pool(topology=(4, lambda disks: {
-        "data": [
-            {"type": "MIRROR", "disks": disks[0:2]},
-            {"type": "MIRROR", "disks": disks[2:4]}
-        ],
-    })) as pool:
-        ssh(f"dd if=/dev/urandom of=/mnt/{pool['name']}/blob bs=1M count=1000")
-        call("pool.remove", pool["id"], {"label": pool["topology"]["data"][0]["guid"]}, job=True)
diff --git a/tests/api2/test_pool_replace_disk.py b/tests/api2/test_pool_replace_disk.py
deleted file mode 100644
index 2c63b02c75e75..0000000000000
--- a/tests/api2/test_pool_replace_disk.py
+++ /dev/null
@@ -1,68 +0,0 @@
-from time import sleep
-
-import pytest
-
-from middlewared.test.integration.assets.pool import _2_disk_mirror_topology, _4_disk_raidz2_topology, another_pool
-from middlewared.test.integration.utils import call
-
-
-@pytest.mark.parametrize("topology", [_2_disk_mirror_topology, _4_disk_raidz2_topology])
-def test_pool_replace_disk(topology):
-    """This tests the following:
-        1. create a zpool based on the `topology`
-        2. flatten the newly created zpools topology
-        3. verify the zpool vdev size matches reality
-        4. choose 1st vdev from newly created zpool
-        5. choose 1st disk in vdev from step #4
-        6. choose 1st disk in disk.get_unused as replacement disk
-        7. call pool.replace using disk from step #5 with disk from step #6
-        8. validate that the disk being replaced still has zfs partitions
-        9. validate pool.get_instance topology info shows the replacement disk
-        10. validate disk.get_instance associates the replacement disk with the zpool
-    """
-    with another_pool(topology=topology) as pool:  # step 1
-        # step 2
-        flat_top = call("pool.flatten_topology", pool["topology"])
-        pool_top = [vdev for vdev in flat_top if vdev["type"] == "DISK"]
-        # step 3
-        assert len(pool_top) == topology[0]
-
-        # step 4
-        to_replace_vdev = pool_top[0]
-        # step 5
-        to_replace_disk = call(
-            "disk.query", [["devname", "=", to_replace_vdev["disk"]]], {"get": True, "extra": {"pools": True}}
-        )
-        assert to_replace_disk["pool"] == pool["name"]
-
-        # step 6
-        new_disk = call("disk.get_unused")[0]
-
-        # step 7
-        call("pool.replace", pool["id"], {"label": to_replace_vdev["guid"], "disk": new_disk["identifier"]}, job=True)
-
-        # step 8
-        assert call("disk.gptid_from_part_type", to_replace_disk["devname"], call("disk.get_zfs_part_type"))
-
-        # step 9
-        found = False
-        for _ in range(10):
-            if not found:
-                for i in call("pool.flatten_topology", call("pool.get_instance", pool["id"])["topology"]):
-                    if i["type"] == "DISK" and i["disk"] == new_disk["devname"]:
-                        found = True
-                        break
-                else:
-                    sleep(1)
-
-        assert found, f'Failed to detect replacement disk {new_disk["devname"]!r} in zpool {pool["name"]!r}'
-
-        # step 10 (NOTE: disk.sync_all takes awhile so we retry a few times here)
-        for _ in range(30):
-            cmd = ("disk.get_instance", new_disk["identifier"], {"extra": {"pools": True}})
-            if call(*cmd)["pool"] == pool["name"]:
-                break
-            else:
-                sleep(1)
-        else:
-            assert False, f"{' '.join(cmd)} failed to update with pool information"
diff --git a/tests/api2/test_pool_replace_disk_settings_description.py b/tests/api2/test_pool_replace_disk_settings_description.py
deleted file mode 100644
index 842f8e9217ec4..0000000000000
--- a/tests/api2/test_pool_replace_disk_settings_description.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import _2_disk_mirror_topology, another_pool
-from middlewared.test.integration.utils import call
-
-
-@pytest.fixture(scope="module")
-def pool():
-    with another_pool(topology=_2_disk_mirror_topology) as pool:
-        yield pool
-
-
-@pytest.mark.parametrize("preserve_description", [True, False])
-def test_pool_replace_disk(pool, preserve_description):
-    pool = call("pool.get_instance", pool["id"])
-    flat_top = call("pool.flatten_topology", pool["topology"])
-    pool_top = [vdev for vdev in flat_top if vdev["type"] == "DISK"]
-
-    to_replace_vdev = pool_top[0]
-    to_replace_disk = call("disk.query", [["name", "=", to_replace_vdev["disk"]]], {"get": True})
-    new_disk = call("disk.get_unused")[0]
-
-    call("disk.update", to_replace_disk["identifier"], {"description": "Preserved disk description"})
-    call("disk.update", new_disk["identifier"], {"description": "Unchanged disk description"})
-
-    call("pool.replace", pool["id"], {
-        "label": to_replace_vdev["guid"],
-        "disk": new_disk["identifier"],
-        "force": True,
-        "preserve_description": preserve_description,
-    }, job=True)
-
-    new_disk = call("disk.get_instance", new_disk["identifier"])
-    if preserve_description:
-        assert new_disk["description"] == "Preserved disk description"
-    else:
-        assert new_disk["description"] == "Unchanged disk description"
diff --git a/tests/api2/test_pool_resilver.py b/tests/api2/test_pool_resilver.py
deleted file mode 100644
index 43bb45a58587c..0000000000000
--- a/tests/api2/test_pool_resilver.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from middlewared.test.integration.utils import call
-
-
-def test_pool_resilver_update():
-    resilver = {
-        "enabled": False,
-        "begin": "18:00",
-        "end": "09:00",
-        "weekday": [1, 2, 3, 4, 5, 6, 7],
-    }
-
-    assert call("pool.resilver.update", resilver).items() > resilver.items()
diff --git a/tests/api2/test_pool_scrub.py b/tests/api2/test_pool_scrub.py
deleted file mode 100644
index ce5650dfea4dc..0000000000000
--- a/tests/api2/test_pool_scrub.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import errno
-
-import pytest
-
-from auto_config import pool_name
-from middlewared.service_exception import ValidationError, ValidationErrors
-from middlewared.test.integration.utils import call
-
-
-@pytest.fixture(scope="module")
-def scrub_info():
-    for i in call("pool.scrub.query", [["pool_name", "=", pool_name]]):
-        return i
-    else:
-        # by default, on pool creation a scrub task is created
-        assert False, f"Failed to find scrub job for {pool_name!r}"
-
-
-def test_create_duplicate_scrub_fails(scrub_info):
-    with pytest.raises(ValidationErrors) as ve:
-        call(
-            "pool.scrub.create",
-            {
-                "pool": scrub_info["pool"],
-                "threshold": 1,
-                "description": "",
-                "schedule": {
-                    "minute": "00",
-                    "hour": "00",
-                    "dom": "1",
-                    "month": "1",
-                    "dow": "1",
-                },
-                "enabled": True,
-            },
-        )
-    assert ve.value.errors == [
-        ValidationError(
-            "pool_scrub_create.pool",
-            "A scrub with this pool already exists",
-            errno.EINVAL,
-        )
-    ]
-
-
-def test_update_scrub(scrub_info):
-    assert call(
-        "pool.scrub.update",
-        scrub_info["id"],
-        {
-            "threshold": 2,
-            "description": "",
-            "schedule": {
-                "minute": "00",
-                "hour": "00",
-                "dom": "1",
-                "month": "1",
-                "dow": "1",
-            },
-            "enabled": True,
-        },
-    )
-
-
-def test_delete_scrub(scrub_info):
-    call("pool.scrub.delete", scrub_info["id"])
-    assert call("pool.scrub.query", [["pool_name", "=", pool_name]]) == []
-
-
-def test_create_scrub(scrub_info):
-    assert call(
-        "pool.scrub.create",
-        {
-            "pool": scrub_info["pool"],
-            "threshold": 1,
-            "description": "",
-            "schedule": {
-                "minute": "00",
-                "hour": "00",
-                "dom": "1",
-                "month": "1",
-                "dow": "1",
-            },
-            "enabled": True,
-        },
-    )
diff --git a/tests/api2/test_pool_spare.py b/tests/api2/test_pool_spare.py
deleted file mode 100644
index ff128da74423a..0000000000000
--- a/tests/api2/test_pool_spare.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import pytest
-
-from truenas_api_client import ValidationErrors
-from middlewared.test.integration.assets.disk import fake_disks
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call
-
-
-def test_pool_create_too_small_spare():
-    disk = call("disk.get_unused")[0]["name"]
-
-    with fake_disks({"sdz": {"size": 1024 * 1024 * 1024}}):
-        with pytest.raises(ValidationErrors) as ve:
-            pool = call("pool.create", {
-                "name": "test",
-                "encryption": False,
-                "allow_duplicate_serials": True,
-                "topology": {
-                    "data": [
-                        {"type": "STRIPE", "disks": [disk]},
-                    ],
-                    "spares": ["sdz"],
-                },
-            }, job=True)
-            call("pool.export", pool["id"], job=True)
-
-        assert ve.value.errors[0].errmsg.startswith("Spare sdz (1 GiB) is smaller than the smallest data disk")
-
-
-def test_pool_update_too_small_spare():
-    with another_pool() as pool:
-        with fake_disks({"sdz": {"size": 1024 * 1024 * 1024}}):
-            with pytest.raises(ValidationErrors) as ve:
-                call("pool.update", pool["id"], {
-                    "topology": {
-                        "spares": ["sdz"],
-                    },
-                }, job=True)
-
-            assert ve.value.errors[0].errmsg.startswith("Spare sdz (1 GiB) is smaller than the smallest data disk")
diff --git a/tests/api2/test_port_delegates.py b/tests/api2/test_port_delegates.py
deleted file mode 100644
index a107c8814527e..0000000000000
--- a/tests/api2/test_port_delegates.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python3
-
-import os
-import pytest
-import sys
-
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call
-
-
-PAYLOAD = (
-    ('ftp.config', 'ftp.update', ['port'], {}),
-)
-
-
-@pytest.mark.parametrize('config_method,method,keys,payload', PAYLOAD)
-def test_port_delegate_validation_with_invalid_ports(config_method, method, keys, payload):
-    in_use_ports = []
-    namespace = config_method.rsplit('.', 1)[0]
-    for entry in call('port.get_in_use'):
-        in_use_ports.extend(filter(lambda i: i[1] > 1024 and entry['namespace'] != namespace, entry['ports']))
-
-    assert in_use_ports != [], 'No in use ports retrieved'
-
-    for index, key in enumerate(keys):
-        payload[key] = in_use_ports[index][1] if len(in_use_ports) > index else in_use_ports[0]
-
-    with pytest.raises(ValidationErrors) as ve:
-        call(method, payload)
-
-    assert any('The port is being used by' in error.errmsg for error in ve.value.errors) is True, ve
-
-
-@pytest.mark.parametrize('config_method,method,keys,payload', PAYLOAD)
-def test_port_delegate_validation_with_valid_ports(config_method, method, keys, payload):
-    in_use_ports = []
-    for entry in call('port.get_in_use'):
-        in_use_ports.extend(entry['ports'])
-
-    assert in_use_ports != [], 'No in use ports retrieved'
-
-    validation_error = None
-    old_config = call(config_method)
-    to_restore_config = {}
-    used_ports = []
-    for key in keys:
-        port = next(i for i in range(20000, 60000) if i not in in_use_ports and i not in used_ports)
-        payload[key] = port
-        used_ports.append(port)
-        to_restore_config[key] = old_config[key]
-
-    try:
-        call(method, payload)
-    except ValidationErrors as ve:
-        validation_error = ve
-    else:
-        call(method, to_restore_config)
-
-    assert validation_error is None, f'No validation exception expected: {validation_error}'
diff --git a/tests/api2/test_posix_acl.py b/tests/api2/test_posix_acl.py
deleted file mode 100644
index a7b11d4fcf01c..0000000000000
--- a/tests/api2/test_posix_acl.py
+++ /dev/null
@@ -1,384 +0,0 @@
-import enum
-
-import pytest
-
-from auto_config import pool_name
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.assets.pool import dataset
-
-ACLTEST_DATASET_NAME = "posixacltest"
-ACLTEST_DATASET_ABS_PATH = f"/mnt/{pool_name}/{ACLTEST_DATASET_NAME}"
-ACLTEST_SUBDATASET_NAME = "sub1"
-ACLTEST_SUBDATASET_ABS_PATH = f"{ACLTEST_DATASET_ABS_PATH}/{ACLTEST_SUBDATASET_NAME}"
-PERMSET_EMPTY = {"READ": False, "WRITE": False, "EXECUTE": False}
-PERMSET_FULL = {"READ": True, "WRITE": True, "EXECUTE": True}
-TAGS = {
-    "USER_OBJ": {"mask_required": False},
-    "GROUP_OBJ": {"mask_required": False},
-    "MASK": {"mask_required": False},
-    "USER": {"mask_required": True},
-    "GROUP": {"mask_required": True},
-    "OTHER": {"mask_required": False},
-}
-
-
-class ACLBrand(enum.Enum):
-    ACCESS = enum.auto()
-    DEFAULT = enum.auto()
-
-    def getacl(self, perms=None):
-        """Default to 770 unless permissions explicitly specified."""
-        permfull = perms if perms else PERMSET_FULL.copy()
-        permempty = perms if perms else PERMSET_EMPTY.copy()
-        default = self.name == "DEFAULT"
-        return [
-            {
-                "tag": "USER_OBJ",
-                "id": -1,
-                "perms": permfull,
-                "default": default,
-                "who": None,
-            },
-            {
-                "tag": "GROUP_OBJ",
-                "id": -1,
-                "perms": permfull,
-                "default": default,
-                "who": None,
-            },
-            {
-                "tag": "OTHER",
-                "id": -1,
-                "perms": permempty,
-                "default": default,
-                "who": None,
-            },
-        ]
-
-
-@pytest.fixture(scope="module")
-def temp_ds():
-    with dataset(
-        ACLTEST_DATASET_NAME, data={"acltype": "POSIX", "aclmode": "DISCARD"}
-    ) as ds:
-        # Verify that our dataset was created successfully
-        # and that the acltype is POSIX1E, which should be
-        # default for a "generic" dataset.
-        info = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
-        assert info["acltype"] == "POSIX1E", info
-
-        # Verify that we can set a trivial POSIX1E ACL
-        call(
-            "filesystem.setacl",
-            {
-                "path": ACLTEST_DATASET_ABS_PATH,
-                "dacl": ACLBrand.ACCESS.getacl(),
-                "gid": 65534,
-                "uid": 65534,
-                "acltype": "POSIX1E",
-            },
-            job=True,
-        )
-
-        # Verify ACL is repoted as trivial
-        info = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
-        assert info["trivial"], info
-
-        # Verify UID/GID
-        assert info["uid"] == 65534, info
-        assert info["gid"] == 65534, info
-
-        # Verify ACL was applied correctly
-        default_acl = ACLBrand.ACCESS.getacl()
-        for idx, acl in enumerate(info["acl"]):
-            for key in ("tag", "perms"):
-                assert acl[key] == default_acl[idx][key], acl[key]
-
-        # create subdataset for inheritance related tests
-        call(
-            "pool.dataset.create",
-            {
-                "name": f"{ds}/{ACLTEST_SUBDATASET_NAME}",
-                "acltype": "POSIX",
-                "aclmode": "DISCARD",
-            },
-        )
-        rv = ssh(
-            "; ".join(
-                [
-                    f"mkdir -p {ACLTEST_DATASET_ABS_PATH}/dir1/dir2",
-                    f"touch {ACLTEST_DATASET_ABS_PATH}/dir1/testfile",
-                    f"touch {ACLTEST_DATASET_ABS_PATH}/dir1/dir2/testfile",
-                ]
-            ),
-            complete_response=True,
-        )
-        assert rv["result"] is True, rv["output"]
-
-        yield
-
-
-"""
-At this point very basic functionality of API endpoint is verified.
-Proceed to more rigorous testing of permissions.
-"""
-
-
-@pytest.mark.parametrize("perm", ["READ", "WRITE", "EXECUTE"])
-def test_set_perms_for(temp_ds, perm):
-    """
-    Validation that READ, WRITE, EXECUTE are set correctly via endpoint.
-    OTHER entry is used for this purpose.
-    """
-    dacls = ACLBrand.ACCESS.getacl()
-    dacls[2]["perms"][perm] = True
-    call(
-        "filesystem.setacl",
-        {"path": ACLTEST_DATASET_ABS_PATH, "dacl": dacls, "acltype": "POSIX1E"},
-        job=True,
-    )
-    rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)["acl"][2]["perms"]
-    assert rv[perm], rv
-
-
-@pytest.mark.parametrize("tag", TAGS.keys())
-def test_set_tag_(temp_ds, tag):
-    """
-    Validation that entries for all tag types can be set correctly.
-    In case of USER_OBJ, GROUP_OBJ, and OTHER, the existing entry
-    is modified to match our test permset. USER and GROUP (named)
-    entries are set for id 1000 (user / group need not exist for
-    this to succeed). Named entries require an additional mask entry.
-    """
-    test_permset = {"READ": True, "WRITE": False, "EXECUTE": True}
-    must_add = True
-    payload = {
-        "path": ACLTEST_DATASET_ABS_PATH,
-        "dacl": ACLBrand.ACCESS.getacl(),
-        "acltype": "POSIX1E",
-    }
-    for entry in payload["dacl"]:
-        if entry["tag"] == tag:
-            entry["perms"] = test_permset
-            must_add = False
-            break
-
-    if must_add:
-        new_entry = {
-            "tag": tag,
-            "perms": test_permset,
-            "id": 1000,
-            "default": False,
-            "who": None,
-        }
-        if tag == "MASK":
-            new_entry["id"] = -1
-            # POSIX ACLs are quite particular about
-            # ACE ordering. We do this on backend.
-            # MASK comes before OTHER.
-            payload["dacl"].insert(2, new_entry)
-        elif tag == "USER":
-            payload["dacl"].insert(1, new_entry)
-        elif tag == "GROUP":
-            payload["dacl"].insert(2, new_entry)
-
-    if TAGS[tag]["mask_required"]:
-        new_entry = {
-            "tag": "MASK",
-            "perms": test_permset,
-            "id": -1,
-            "default": False,
-            "who": None,
-        }
-        payload["dacl"].insert(3, new_entry)
-
-    call("filesystem.setacl", payload, job=True)
-    rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
-    assert payload["dacl"] == rv["acl"], rv
-
-
-@pytest.mark.parametrize("tag", TAGS.keys())
-def test_set_default_tag_(temp_ds, tag):
-    """
-    Validation that entries for all tag types can be set correctly.
-    In case of USER_OBJ, GROUP_OBJ, and OTHER, the existing entry
-    is modified to match our test permset. USER and GROUP (named)
-    entries are set for id 1000 (user / group need not exist for
-    this to succeed). Named entries require an additional mask entry.
-    This particular test covers "default" entries in POSIX1E ACL.
-    """
-    test_permset = {"READ": True, "WRITE": False, "EXECUTE": True}
-    must_add = True
-    payload = {
-        "path": ACLTEST_DATASET_ABS_PATH,
-        "dacl": ACLBrand.ACCESS.getacl(),
-        "acltype": "POSIX1E",
-    }
-    default = ACLBrand.DEFAULT.getacl()
-    for entry in default:
-        if entry["tag"] == tag:
-            entry["perms"] = test_permset
-            must_add = False
-
-    if must_add:
-        new_entry = {
-            "tag": tag,
-            "perms": test_permset,
-            "id": 1000,
-            "default": True,
-            "who": None,
-        }
-        if tag == "MASK":
-            new_entry["id"] = -1
-            # POSIX ACLs are quite particular about
-            # ACE ordering. We do this on backend.
-            # MASK comes before OTHER.
-            default.insert(2, new_entry)
-        elif tag == "USER":
-            default.insert(1, new_entry)
-        elif tag == "GROUP":
-            default.insert(2, new_entry)
-
-    if TAGS[tag]["mask_required"]:
-        new_entry = {
-            "tag": "MASK",
-            "perms": test_permset,
-            "id": -1,
-            "default": True,
-            "who": None,
-        }
-        default.insert(3, new_entry)
-
-    payload["dacl"].extend(default)
-    call("filesystem.setacl", payload, job=True)
-    rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
-    assert payload["dacl"] == rv["acl"], rv
-    assert rv["trivial"] is False, rv
-
-
-def test_non_recursive_acl_strip(temp_ds):
-    """
-    Verify that non-recursive ACL strip works correctly.
-    We do this by checking result of subsequent getacl
-    request on the path (it should report that it is "trivial").
-    """
-    call(
-        "filesystem.setacl",
-        {
-            "path": ACLTEST_DATASET_ABS_PATH,
-            "dacl": [],
-            "acltype": "POSIX1E",
-            "options": {"stripacl": True},
-        },
-        job=True,
-    )
-    rv = call("filesystem.getacl", ACLTEST_DATASET_ABS_PATH)
-    assert rv["trivial"] is True, rv
-
-
-"""
-This next series of tests verifies that ACLs are being inherited correctly.
-We first create a child dataset to verify that ACLs do not change unless
-'traverse' is set.
-"""
-
-
-def test_recursive_no_traverse(temp_ds):
-    """
-    Test that ACL is recursively applied correctly, but does
-    not affect mountpoint of child dataset.
-
-    In this case, access ACL will have 750 for dataset mountpoint,
-    and default ACL will have 777. Recusively applying will grant
-    777 for access and default.
-    """
-    payload = {
-        "path": ACLTEST_DATASET_ABS_PATH,
-        "gid": 65534,
-        "uid": 65534,
-        "dacl": ACLBrand.ACCESS.getacl(),
-        "acltype": "POSIX1E",
-        "options": {"recursive": True},
-    }
-    new_perms = {"READ": True, "WRITE": True, "EXECUTE": True}
-    default = ACLBrand.DEFAULT.getacl(new_perms)
-    payload["dacl"].extend(default)
-    call("filesystem.setacl", payload, job=True)
-
-    # Verify that subdataset hasn't changed. Should still report as trivial.
-    rv = call("filesystem.getacl", ACLTEST_SUBDATASET_ABS_PATH)
-    assert rv["trivial"], rv
-
-    # Verify that user was changed on subdirectory
-    rv = call("filesystem.getacl", f"{ACLTEST_DATASET_ABS_PATH}/dir1")
-    assert rv["uid"] == 65534, rv
-    assert rv["trivial"] is False, rv
-    for entry in rv["acl"]:
-        assert entry["perms"] == new_perms, rv["acl"]
-
-
-def test_recursive_with_traverse(temp_ds):
-    """
-    This test verifies that setting `traverse = True`
-    will allow setacl operation to cross mountpoints.
-    """
-    payload = {
-        "gid": 65534,
-        "uid": 65534,
-        "path": ACLTEST_DATASET_ABS_PATH,
-        "dacl": ACLBrand.ACCESS.getacl(),
-        "acltype": "POSIX1E",
-        "options": {"recursive": True, "traverse": True},
-    }
-    default = ACLBrand.DEFAULT.getacl({"READ": True, "WRITE": True, "EXECUTE": True})
-    payload["dacl"].extend(default)
-    call("filesystem.setacl", payload, job=True)
-    rv = call("filesystem.getacl", ACLTEST_SUBDATASET_ABS_PATH)
-    assert rv["trivial"] is False, rv
-    assert rv["uid"] == 65534, rv
-
-
-def test_strip_acl_from_dataset(temp_ds):
-    """
-    Strip ACL via filesystem.setperm endpoint.
-    This should work even for POSIX1E ACLs.
-    """
-    call(
-        "filesystem.setperm",
-        {
-            "path": ACLTEST_DATASET_ABS_PATH,
-            "mode": "777",
-            "options": {"stripacl": True, "recursive": True},
-        },
-        job=True,
-    )
-
-
-"""
-The next four tests check that we've remotved the ACL from the
-mountpoint, a subdirectory, and a file. These are all potentially
-different cases for where we can fail to strip an ACL.
-"""
-
-
-def test_filesystem_acl_is_not_removed_child_dataset(temp_ds):
-    rv = call("filesystem.stat", ACLTEST_SUBDATASET_ABS_PATH)
-    assert rv["acl"] is True, rv
-
-
-def test_filesystem_acl_is_removed_from_mountpoint(temp_ds):
-    rv = call("filesystem.stat", ACLTEST_DATASET_ABS_PATH)
-    assert rv["acl"] is False, rv
-    assert oct(rv["mode"]) == "0o40777", rv
-
-
-def test_filesystem_acl_is_removed_from_subdir(temp_ds):
-    rv = call("filesystem.stat", f"{ACLTEST_DATASET_ABS_PATH}/dir1")
-    assert rv["acl"] is False, rv
-    assert oct(rv["mode"]) == "0o40777", rv
-
-
-def test_filesystem_acl_is_removed_from_file(temp_ds):
-    rv = call("filesystem.stat", f"{ACLTEST_DATASET_ABS_PATH}/dir1/testfile")
-    assert rv["acl"] is False, rv
-    assert oct(rv["mode"]) == "0o100777", rv
diff --git a/tests/api2/test_quotas.py b/tests/api2/test_quotas.py
deleted file mode 100644
index 23d24eb4b151b..0000000000000
--- a/tests/api2/test_quotas.py
+++ /dev/null
@@ -1,118 +0,0 @@
-from dataclasses import dataclass
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-
-
-@dataclass(frozen=True)
-class QuotaConfig:
-    # user quota value
-    uq_value: int = 1000000
-    # group quota value
-    gq_value: int = uq_value * 2
-    # dataset quota value
-    dq_value: int = gq_value + 10000
-    # dataset refquota value
-    drq_value: int = dq_value + 10000
-    # temp dataset name
-    ds_name: str = 'temp_quota_ds_name'
-
-
-@pytest.fixture(scope='module')
-def temp_ds():
-    with dataset(QuotaConfig.ds_name) as ds:
-        yield ds
-
-
-@pytest.fixture(scope='module')
-def temp_user(temp_ds):
-    user_info = {
-        'username': 'test_quota_user',
-        'full_name': 'Test Quota User',
-        'password': 'test1234',
-        'group_create': True,
-    }
-    with user(user_info) as u:
-        uid = call('user.get_instance', u['id'])['uid']
-        grp = call('group.query', [['group', '=', u['username']]], {'get': True})
-        yield {'uid': uid, 'gid': grp['gid'], 'user': u['username'], 'group': grp['group']}
-
-
-@pytest.mark.parametrize('id_', ['0', 'root'])
-@pytest.mark.parametrize(
-    'quota_type,error', [
-        (['USER', 'user quota on uid']),
-        (['USEROBJ', 'userobj quota on uid']),
-        (['GROUP', 'group quota on gid']),
-        (['GROUPOBJ', 'groupobj quota on gid']),
-    ],
-    ids=[
-        'root USER quota is prohibited',
-        'root USEROBJ quota is prohibited',
-        'root GROUP quota is prohibited',
-        'root GROUPOBJ quota is prohibited',
-    ],
-)
-def test_error(temp_ds, id_, quota_type, error):
-    """Changing any quota type for the root user/group should be prohibited"""
-    with pytest.raises(ValidationErrors) as ve:
-        call('pool.dataset.set_quota', temp_ds, [{'quota_type': quota_type, 'id': id_, 'quota_value': 5242880}])
-    assert ve.value.errors[0].errmsg == f'Setting {error} [0] is not permitted'
-
-
-def test_quotas(temp_ds, temp_user):
-    user, uid = temp_user['user'], temp_user['uid']
-    group, gid = temp_user['group'], temp_user['gid']
-    uq_value = QuotaConfig.uq_value
-    gq_value = QuotaConfig.gq_value
-    dq_value = QuotaConfig.dq_value
-    drq_value = QuotaConfig.drq_value
-
-    call('pool.dataset.set_quota', temp_ds, [
-        {'quota_type': 'USER', 'id': user, 'quota_value': uq_value},
-        {'quota_type': 'USEROBJ', 'id': user, 'quota_value': uq_value},
-        {'quota_type': 'GROUP', 'id': group, 'quota_value': gq_value},
-        {'quota_type': 'GROUPOBJ', 'id': group, 'quota_value': gq_value},
-        {'quota_type': 'DATASET', 'id': 'QUOTA', 'quota_value': dq_value},
-        {'quota_type': 'DATASET', 'id': 'REFQUOTA', 'quota_value': drq_value},
-    ])
-
-    verify_info = (
-        (
-            {
-                'quota_type': 'USER',
-                'id': uid,
-                'quota': uq_value,
-                'obj_quota': uq_value,
-                'name': user
-            },
-            'USER',
-        ),
-        (
-            {
-                'quota_type': 'GROUP',
-                'id': gid,
-                'quota': gq_value,
-                'obj_quota': gq_value,
-                'name': group
-            },
-            'GROUP',
-        ),
-        (
-            {
-                'quota_type': 'DATASET',
-                'id': temp_ds,
-                'name': temp_ds,
-                'quota': dq_value,
-                'refquota': drq_value,
-            },
-            'DATASET',
-        ),
-    )
-    for er, quota_type in verify_info:
-        for result in filter(lambda x: x['id'] == er['id'], call('pool.dataset.get_quota', temp_ds, quota_type)):
-            assert all((result[j] == er[j] for j in er)), result
diff --git a/tests/api2/test_rate_limit.py b/tests/api2/test_rate_limit.py
deleted file mode 100644
index 15eafc7d76f5b..0000000000000
--- a/tests/api2/test_rate_limit.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import errno
-
-import pytest
-
-from middlewared.test.integration.utils import call, client
-
-NOAUTH_METHOD = 'system.boot_id'
-SEP = '_##_'
-
-
-def test_unauth_requests_are_rate_limited():
-    """Test that the truenas server rate limits a caller
-    that is hammering an endpoint that requires no authentication."""
-    with client(auth=None) as c:
-        for i in range(1, 22):
-            if i <= 20:
-                # default is 20 calls within 60 second timeframe
-                assert c.call(NOAUTH_METHOD)
-            else:
-                with pytest.raises(Exception) as ve:
-                    # on 21st call within 60 seconds, rate limit kicks in
-                    c.call(NOAUTH_METHOD)
-                assert ve.value.errno == errno.EBUSY
-
-    """Test that middleware's rate limit plugin for interacting
-    with the global cache behaves as intended."""
-    cache = call('rate.limit.cache_get')
-    # the mechanism by which the rate limit chooses a unique key
-    # for inserting into the dictionary is by using the api endpoint
-    # name as part of the string
-    assert any((NOAUTH_METHOD in i for i in cache)), cache
-
-    # now let's pop the last entry of the cache
-    len_cache_before_pop = len(cache)
-    popped_method, popped_ip = list(cache)[-1].split(SEP)
-    call('rate.limit.cache_pop', popped_method, popped_ip)
-    new_cache = call('rate.limit.cache_get')
-    assert len(new_cache) != len_cache_before_pop, new_cache
-
-    # finally, let's clear the cache
-    call('rate.limit.cache_clear')
-    new_new_cache = call('rate.limit.cache_get')
-    assert len(new_new_cache) == 0, new_new_cache
-
-
-@pytest.mark.parametrize('method_name', [NOAUTH_METHOD, 'system.host_id'])
-def test_authorized_requests_are_not_rate_limited(method_name):
-    """Test that the truenas server does NOT rate limit a caller
-    that hammers an endpoint when said caller has been authenticated"""
-    for i in range(1, 22):
-        assert call(method_name)
diff --git a/tests/api2/test_replication.py b/tests/api2/test_replication.py
deleted file mode 100644
index 1b6e49c635f94..0000000000000
--- a/tests/api2/test_replication.py
+++ /dev/null
@@ -1,243 +0,0 @@
-import contextlib
-import random
-import string
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.keychain import localhost_ssh_credentials
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.replication import replication_task
-from middlewared.test.integration.assets.snapshot_task import snapshot_task
-from middlewared.test.integration.utils import call, pool, ssh
-
-
-BASE_REPLICATION = {
-    "direction": "PUSH",
-    "transport": "LOCAL",
-    "source_datasets": ["data"],
-    "target_dataset": "data",
-    "recursive": False,
-    "auto": False,
-    "retention_policy": "NONE",
-}
-
-
-@pytest.fixture(scope="module")
-def ssh_credentials():
-    with localhost_ssh_credentials(username="root") as c:
-        yield c
-
-
-@pytest.fixture(scope="module")
-def periodic_snapshot_tasks():
-    result = {}
-    with contextlib.ExitStack() as stack:
-        for k, v in {
-            "data-recursive": {
-                "dataset": "tank/data",
-                "recursive": True,
-                "lifetime_value": 1,
-                "lifetime_unit": "WEEK",
-                "naming_schema": "auto-%Y%m%d.%H%M%S-1w",
-                "schedule": {},
-            },
-            "data-work-nonrecursive": {
-                "dataset": "tank/data/work",
-                "recursive": False,
-                "lifetime_value": 1,
-                "lifetime_unit": "WEEK",
-                "naming_schema": "auto-%Y%m%d.%H%M%S-1w",
-                "schedule": {},
-            },
-
-            "exclude": {
-                "dataset": "tank/exclude",
-                "recursive": True,
-                "exclude": ["tank/exclude/work/garbage"],
-                "lifetime_value": 1,
-                "lifetime_unit": "WEEK",
-                "naming_schema": "snap-%Y%m%d-%H%M-1w",
-                "schedule": {},
-            },
-        }.items():
-            stack.enter_context(dataset(v["dataset"].removeprefix("tank/")))
-            result[k] = stack.enter_context(snapshot_task(v))
-
-        yield result
-
-
-@pytest.mark.parametrize("req,error", [
-    # Push + naming-schema
-    (dict(naming_schema=["snap-%Y%m%d-%H%M-1m"]), "naming_schema"),
-
-    # Auto with both periodic snapshot task and schedule
-    (dict(periodic_snapshot_tasks=["data-recursive"], schedule={"minute": "*/2"}, auto=True), None),
-    # Auto with periodic snapshot task
-    (dict(periodic_snapshot_tasks=["data-recursive"], auto=True), None),
-    # Auto with schedule
-    (dict(also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"], schedule={"minute": "*/2"}, auto=True), None),
-    # Auto without periodic snapshot task or schedule
-    (dict(also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"], auto=True), "auto"),
-
-    # Pull + periodic snapshot tasks
-    (dict(direction="PULL", periodic_snapshot_tasks=["data-recursive"]), "periodic_snapshot_tasks"),
-    # Pull with naming schema
-    (dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"]), None),
-    # Pull + also_include_naming_schema
-    (dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"], also_include_naming_schema=["snap-%Y%m%d-%H%M-2m"]),
-     "also_include_naming_schema"),
-    # Pull + hold_pending_snapshots
-    (dict(direction="PULL", naming_schema=["snap-%Y%m%d-%H%M-1w"], hold_pending_snapshots=True),
-     "hold_pending_snapshots"),
-
-    # SSH+Netcat
-    (dict(periodic_snapshot_tasks=["data-recursive"],
-          transport="SSH+NETCAT", ssh_credentials=True, netcat_active_side="LOCAL", netcat_active_side_port_min=1024,
-          netcat_active_side_port_max=50000),
-     None),
-    # Bad netcat_active_side_port_max
-    (dict(transport="SSH+NETCAT", ssh_credentials=True, netcat_active_side="LOCAL", netcat_active_side_port_min=60000,
-          netcat_active_side_port_max=50000),
-     "netcat_active_side_port_max"),
-    # SSH+Netcat + compression
-    (dict(transport="SSH+NETCAT", compression="LZ4"), "compression"),
-    # SSH+Netcat + speed limit
-    (dict(transport="SSH+NETCAT", speed_limit=1024), "speed_limit"),
-
-    # Does not exclude garbage
-    (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True), "exclude"),
-    # Does not exclude garbage
-    (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True,
-          exclude=["tank/exclude/work/garbage"]),
-     None),
-    # May not exclude if not recursive
-    (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=False), None),
-    # Can't replicate excluded dataset
-    (dict(source_datasets=["tank/exclude/work/garbage"], periodic_snapshot_tasks=["exclude"]),
-     "source_datasets.0"),
-
-    # Non-recursive exclude
-    (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=False,
-          exclude=["tank/exclude/work/garbage"]),
-     "exclude"),
-
-    # Unrelated exclude
-    (dict(source_datasets=["tank/exclude/work"], periodic_snapshot_tasks=["exclude"], recursive=True,
-          exclude=["tank/data"]),
-     "exclude.0"),
-
-    # Does not require unrelated exclude
-    (dict(source_datasets=["tank/exclude/work/important"], periodic_snapshot_tasks=["exclude"], recursive=True),
-     None),
-
-    # Custom retention policy
-    (dict(periodic_snapshot_tasks=["data-recursive"],
-          retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK"), None),
-
-    # Complex custom retention policy
-    (dict(periodic_snapshot_tasks=["data-recursive"],
-          retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK", lifetimes=[
-              dict(schedule={"hour": "0"}, lifetime_value=30, lifetime_unit="DAY"),
-              dict(schedule={"hour": "0", "dow": "1"}, lifetime_value=1, lifetime_unit="YEAR"),
-          ]), None),
-
-    # name_regex
-    (dict(name_regex="manual-.+"), None),
-    (dict(direction="PULL", name_regex="manual-.+"), None),
-    (dict(name_regex="manual-.+",
-          retention_policy="CUSTOM", lifetime_value=2, lifetime_unit="WEEK"), "retention_policy"),
-
-    # replicate
-    (dict(source_datasets=["tank/data", "tank/data/work"], periodic_snapshot_tasks=["data-recursive"], replicate=True,
-          recursive=True, properties=True),
-     "source_datasets.1"),
-    (dict(source_datasets=["tank/data"], periodic_snapshot_tasks=["data-recursive", "data-work-nonrecursive"],
-          replicate=True, recursive=True, properties=True),
-     "periodic_snapshot_tasks.1"),
-])
-def test_create_replication(ssh_credentials, periodic_snapshot_tasks, req, error):
-    if "ssh_credentials" in req:
-        req["ssh_credentials"] = ssh_credentials["credentials"]["id"]
-
-    if "periodic_snapshot_tasks" in req:
-        req["periodic_snapshot_tasks"] = [periodic_snapshot_tasks[k]["id"] for k in req["periodic_snapshot_tasks"]]
-
-    name = "".join(random.choice(string.ascii_letters) for _ in range(64))
-    data = dict(BASE_REPLICATION, name=name, **req)
-
-    if error:
-        with pytest.raises(ValidationErrors) as ve:
-            with replication_task(data):
-                pass
-
-        assert any(e.attribute == f"replication_create.{error}" for e in ve.value.errors)
-    else:
-        with replication_task(data) as replication:
-            restored = call("replication.restore", replication["id"], {
-                "name": f"restore {name}",
-                "target_dataset": "data/restore",
-            })
-            call("replication.delete", restored["id"])
-
-
-@pytest.mark.parametrize("data,path,include", [
-    ({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/", True),
-    ({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/child", True),
-    ({"direction": "PUSH", "source_datasets": ["data/child"]}, "/mnt/data/child/work", False),
-    ({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data", True),
-    ({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data/child", True),
-    ({"direction": "PULL", "target_dataset": "data/child"}, "/mnt/data/child/work", False),
-])
-def test_query_attachment_delegate(ssh_credentials, data, path, include):
-    data = {
-        "name": "Test",
-        "transport": "SSH",
-        "source_datasets": ["source"],
-        "target_dataset": "target",
-        "recursive": False,
-        "name_regex": ".+",
-        "auto": False,
-        "retention_policy": "NONE",
-        **data,
-    }
-    if data["transport"] == "SSH":
-        data["ssh_credentials"] = ssh_credentials["credentials"]["id"]
-
-    with replication_task(data) as t:
-        result = call("pool.dataset.query_attachment_delegate", "replication", path, True)
-        if include:
-            assert len(result) == 1
-            assert result[0]["id"] == t["id"]
-        else:
-            assert len(result) == 0
-
-
-@pytest.mark.parametrize("exclude_mountpoint_property", [True, False])
-def test_run_onetime__exclude_mountpoint_property(exclude_mountpoint_property):
-    with dataset("src") as src:
-        with dataset("src/legacy") as src_legacy:
-            ssh(f"zfs set mountpoint=legacy {src_legacy}")
-            ssh(f"zfs snapshot -r {src}@2022-01-01-00-00-00")
-
-            try:
-                call("replication.run_onetime", {
-                    "direction": "PUSH",
-                    "transport": "LOCAL",
-                    "source_datasets": [src],
-                    "target_dataset": f"{pool}/dst",
-                    "recursive": True,
-                    "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
-                    "retention_policy": "NONE",
-                    "replicate": True,
-                    "readonly": "IGNORE",
-                    "exclude_mountpoint_property": exclude_mountpoint_property
-                }, job=True)
-
-                mountpoint = ssh(f"zfs get -H -o value mountpoint {pool}/dst/legacy").strip()
-                if exclude_mountpoint_property:
-                    assert mountpoint == f"/mnt/{pool}/dst/legacy"
-                else:
-                    assert mountpoint == "legacy"
-            finally:
-                ssh(f"zfs destroy -r {pool}/dst", check=False)
diff --git a/tests/api2/test_replication_role.py b/tests/api2/test_replication_role.py
deleted file mode 100644
index 8543b5baca0ca..0000000000000
--- a/tests/api2/test_replication_role.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.replication import replication_task
-
-
-@pytest.mark.parametrize("has_pull", [False, True])
-def test_create_pull_replication(has_pull):
-    with dataset("src") as src:
-        with dataset("dst") as dst:
-            payload = {
-                "name": "Test",
-                "direction": "PULL",
-                "transport": "LOCAL",
-                "source_datasets": [src],
-                "target_dataset": dst,
-                "recursive": True,
-                "naming_schema": ["%Y-%m-%d-%H-%M-%S"],
-                "retention_policy": "NONE",
-                "auto": False,
-            }
-
-            if has_pull:
-                role = "REPLICATION_TASK_WRITE_PULL"
-            else:
-                role = "REPLICATION_TASK_WRITE"
-            with unprivileged_user_client([role]) as c:
-                if has_pull:
-                    task = c.call("replication.create", payload)
-                    c.call("replication.delete", task["id"])
-                else:
-                    with pytest.raises(ValidationErrors) as ve:
-                        c.call("replication.create", payload)
-
-                    assert ve.value.errors[0].attribute == "replication_create.direction"
-
-
-@pytest.mark.parametrize("has_pull", [False, True])
-def test_update_pull_replication(has_pull):
-    with dataset("src") as src:
-        with dataset("dst") as dst:
-            with replication_task({
-                "name": "Test",
-                "direction": "PUSH",
-                "transport": "LOCAL",
-                "source_datasets": [src],
-                "target_dataset": dst,
-                "recursive": True,
-                "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
-                "retention_policy": "NONE",
-                "auto": False,
-            }) as t:
-                payload = {
-                    "direction": "PULL",
-                    "naming_schema": ["%Y-%m-%d-%H-%M-%S"],
-                    "also_include_naming_schema": [],
-                }
-
-                if has_pull:
-                    role = "REPLICATION_TASK_WRITE_PULL"
-                else:
-                    role = "REPLICATION_TASK_WRITE"
-                with unprivileged_user_client([role]) as c:
-                    if has_pull:
-                        c.call("replication.update", t["id"], payload)
-                    else:
-                        with pytest.raises(ValidationErrors) as ve:
-                            c.call("replication.update", t["id"], payload)
-
-                        assert ve.value.errors[0].attribute == "replication_update.direction"
-
-
-@pytest.mark.parametrize("has_pull", [False, True])
-def test_restore_push_replication(has_pull):
-    with dataset("src") as src:
-        with dataset("dst") as dst:
-            with replication_task({
-                "name": "Test",
-                "direction": "PUSH",
-                "transport": "LOCAL",
-                "source_datasets": [src],
-                "target_dataset": dst,
-                "recursive": True,
-                "also_include_naming_schema": ["%Y-%m-%d-%H-%M-%S"],
-                "retention_policy": "NONE",
-                "auto": False,
-            }) as t:
-                with dataset("dst2") as dst2:
-                    payload = {
-                        "name": "Test restore",
-                        "target_dataset": dst2,
-                    }
-
-                    if has_pull:
-                        role = "REPLICATION_TASK_WRITE_PULL"
-                    else:
-                        role = "REPLICATION_TASK_WRITE"
-                    with unprivileged_user_client([role]) as c:
-                        if has_pull:
-                            rt = c.call("replication.restore", t["id"], payload)
-                            c.call("replication.delete", rt["id"])
-                        else:
-                            with pytest.raises(ValidationErrors) as ve:
-                                c.call("replication.restore", t["id"], payload)
-
-                            assert ve.value.errors[0].attribute == "replication_create.direction"
diff --git a/tests/api2/test_replication_sudo.py b/tests/api2/test_replication_sudo.py
deleted file mode 100644
index d14a1147b0d99..0000000000000
--- a/tests/api2/test_replication_sudo.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, password, ssh
-
-
-@pytest.mark.parametrize("task", [
-    {"direction": "PUSH", "also_include_naming_schema": ["auto-%Y-%m-%d-%H-%M"]},
-    {"direction": "PULL", "naming_schema": ["auto-%Y-%m-%d-%H-%M"]},
-])
-def test_replication_sudo(task):
-    with dataset("admin") as admin_homedir:
-        with user({
-            "username": "admin",
-            "full_name": "admin",
-            "group_create": True,
-            "home": f"/mnt/{admin_homedir}",
-            "password": "test1234",
-        }):
-            ssh_connection = call("keychaincredential.setup_ssh_connection", {
-                "private_key": {
-                    "generate_key": True,
-                    "name": "test key",
-                },
-                "connection_name": "test",
-                "setup_type": "SEMI-AUTOMATIC",
-                "semi_automatic_setup": {
-                    "url": "http://localhost",
-                    "password": password(),
-                    "username": "admin",
-                    "sudo": True,
-                },
-            })
-            try:
-                with dataset("src") as src:
-                    ssh(f"touch /mnt/{src}/test")
-                    call("zfs.snapshot.create", {"dataset": src, "name": "auto-2023-01-18-16-00"})
-                    with dataset("dst") as dst:
-                        call("replication.run_onetime", {
-                            **task,
-                            "transport": "SSH",
-                            "ssh_credentials": ssh_connection["id"],
-                            "sudo": True,
-                            "source_datasets": [src],
-                            "target_dataset": dst,
-                            "recursive": False,
-                            "retention_policy": "NONE",
-                        }, job=True)
-
-                        assert ssh(f"ls /mnt/{dst}") == "test\n"
-            finally:
-                call("keychaincredential.delete", ssh_connection["id"])
-                call("keychaincredential.delete", ssh_connection["attributes"]["private_key"])
diff --git a/tests/api2/test_replication_utils.py b/tests/api2/test_replication_utils.py
deleted file mode 100644
index 4f3bfd7a1d119..0000000000000
--- a/tests/api2/test_replication_utils.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call, pool
-
-
-@pytest.fixture(scope="module")
-def localhost_ssh_connection():
-    credential = call("keychaincredential.create", {
-        "name": "key",
-        "type": "SSH_KEY_PAIR",
-        "attributes": call("keychaincredential.generate_ssh_key_pair"),
-    })
-    try:
-        token = call("auth.generate_token", 600, {}, False)
-        connection = call("keychaincredential.remote_ssh_semiautomatic_setup", {
-            "name": "localhost",
-            "url": "http://localhost",
-            "token": token,
-            "private_key": credential["id"],
-        })
-        try:
-            yield connection["id"]
-        finally:
-            call("keychaincredential.delete", connection["id"])
-    finally:
-        call("keychaincredential.delete", credential["id"])
-
-
-@pytest.mark.parametrize("transport", ["SSH", "SSH+NETCAT"])
-def test_list_datasets_ssh(localhost_ssh_connection, transport):
-    assert pool in call("replication.list_datasets", transport, localhost_ssh_connection)
diff --git a/tests/api2/test_reporting_netdataweb.py b/tests/api2/test_reporting_netdataweb.py
deleted file mode 100644
index b6cf98f7c6c6b..0000000000000
--- a/tests/api2/test_reporting_netdataweb.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import pytest
-import requests
-from requests.auth import HTTPBasicAuth
-
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.utils import call, url
-
-
-def test_netdata_web_login_succeed():
-    password = call('reporting.netdataweb_generate_password')
-    r = requests.get(f'{url()}/netdata/', auth=HTTPBasicAuth('root', password))
-    assert r.status_code == 200
-
-
-def test_netdata_web_login_fail():
-    r = requests.get(f'{url()}/netdata/')
-    assert r.status_code == 401
-
-
-@pytest.mark.parametrize("role,expected",  [
-    (["FULL_ADMIN"], True),
-    (["READONLY_ADMIN"], True),
-])
-def test_netdata_web_login_unprivileged_succeed(role, expected):
-    with unprivileged_user_client(roles=role) as c:
-        me = c.call('auth.me')
-        password = c.call('reporting.netdataweb_generate_password')
-        r = requests.get(f'{url()}/netdata/', auth=HTTPBasicAuth(me['pw_name'], password))
-        assert (r.status_code == 200) is expected
diff --git a/tests/api2/test_reporting_realtime.py b/tests/api2/test_reporting_realtime.py
deleted file mode 100644
index 59ebd7fefb090..0000000000000
--- a/tests/api2/test_reporting_realtime.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import time
-
-from middlewared.test.integration.assets.account import unprivileged_user_client
-
-
-def test_reporting_realtime():
-    with unprivileged_user_client(["REPORTING_READ"]) as c:
-        events = []
-
-        def callback(type, **message):
-            events.append((type, message))
-
-        c.subscribe("reporting.realtime", callback, sync=True)
-
-        time.sleep(5)
-
-        assert events
-
-        assert not events[0][1]["fields"]["failed_to_connect"]
diff --git a/tests/api2/test_rest_api_authentication.py b/tests/api2/test_rest_api_authentication.py
deleted file mode 100644
index 9eb55d1470ad9..0000000000000
--- a/tests/api2/test_rest_api_authentication.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# -*- coding=utf-8 -*-
-import contextlib
-import io
-import json
-
-import pytest
-import requests
-
-from middlewared.test.integration.assets.account import unprivileged_user as unprivileged_user_template
-from middlewared.test.integration.assets.api_key import api_key
-from middlewared.test.integration.utils import client
-from middlewared.test.integration.utils.client import truenas_server
-
-import os
-import sys
-sys.path.append(os.getcwd())
-from functions import GET
-
-
-@contextlib.contextmanager
-def api_key_auth(allowlist):
-    with unprivileged_user_template(
-        username="unprivileged2",
-        group_name="unprivileged_users2",
-        privilege_name="Unprivileged users",
-        allowlist=allowlist,
-        web_shell=False,
-    ) as t:
-        with api_key(t.username) as key:
-            yield dict(anonymous=True, headers={"Authorization": f"Bearer {key}"})
-
-
-@contextlib.contextmanager
-def login_password_auth(allowlist):
-    with unprivileged_user_template(
-        username="unprivileged",
-        group_name="unprivileged_users",
-        privilege_name="Unprivileged users",
-        allowlist=allowlist,
-        web_shell=False,
-    ) as t:
-        yield dict(auth=(t.username, t.password))
-
-
-@contextlib.contextmanager
-def token_auth(allowlist):
-    with unprivileged_user_template(
-        username="unprivileged",
-        group_name="unprivileged_users",
-        privilege_name="Unprivileged users",
-        allowlist=allowlist,
-        web_shell=False,
-    ) as t:
-        with client(auth=(t.username, t.password)) as c:
-            token = c.call("auth.generate_token", 300, {}, True)
-            yield dict(anonymous=True, headers={"Authorization": f"Token {token}"})
-
-
-@pytest.fixture(params=[api_key_auth, login_password_auth, token_auth])
-def auth(request):
-    return request.param
-
-
-def test_root_api_key_rest(auth):
-    """We should be able to call a method with a root credential using REST API."""
-    with auth([{"method": "*", "resource": "*"}]) as kwargs:
-        results = GET('/system/info/', **kwargs)
-        assert results.status_code == 200, results.text
-
-
-def test_allowed_api_key_rest_plain(auth):
-    """We should be able to request an endpoint with a credential that allows that request using REST API."""
-    with auth([{"method": "GET", "resource": "/system/info/"}]) as kwargs:
-        results = GET('/system/info/', **kwargs)
-        assert results.status_code == 200, results.text
-
-
-def test_allowed_api_key_rest_dynamic(auth):
-    """We should be able to request a dynamic endpoint with a credential that allows that request using REST API."""
-    with auth([{"method": "GET", "resource": "/user/id/{id_}/"}]) as kwargs:
-        results = GET('/user/id/1/', **kwargs)
-        assert results.status_code == 200, results.text
-
-
-def test_denied_api_key_rest(auth):
-    """
-    We should not be able to request an endpoint with a credential that does not allow that request using REST API.
-    """
-    with auth([{"method": "GET", "resource": "/system/info_/"}]) as kwargs:
-        results = GET('/system/info/', **kwargs)
-        assert results.status_code == 403
-
-
-def test_root_api_key_upload(auth):
-    """We should be able to call a method with root a credential using file upload endpoint."""
-    ip = truenas_server.ip
-    with auth([{"method": "*", "resource": "*"}]) as kwargs:
-        kwargs.pop("anonymous", None)  # This key is only used for our test requests library
-        r = requests.post(
-            f"http://{ip}/_upload",
-            **kwargs,
-            data={
-                "data": json.dumps({
-                    "method": "filesystem.put",
-                    "params": ["/tmp/upload"],
-                })
-            },
-            files={
-                "file": io.BytesIO(b"test"),
-            },
-            timeout=10
-        )
-        r.raise_for_status()
-
-
-def test_allowed_api_key_upload(auth):
-    """We should be able to call a method with an API that allows that call using file upload endpoint."""
-    ip = truenas_server.ip
-    with auth([{"method": "CALL", "resource": "filesystem.put"}]) as kwargs:
-        kwargs.pop("anonymous", None)  # This key is only used for our test requests library
-        r = requests.post(
-            f"http://{ip}/_upload",
-            **kwargs,
-            data={
-                "data": json.dumps({
-                    "method": "filesystem.put",
-                    "params": ["/tmp/upload"],
-                })
-            },
-            files={
-                "file": io.BytesIO(b"test"),
-            },
-            timeout=10
-        )
-        r.raise_for_status()
-
-
-def test_denied_api_key_upload(auth):
-    """
-    We should not be able to call a method with a credential that does not allow that call using file upload endpoint.
-    """
-    ip = truenas_server.ip
-    with auth([{"method": "CALL", "resource": "filesystem.put_"}]) as kwargs:
-        kwargs.pop("anonymous", None)  # This key is only used for our test requests library
-        r = requests.post(
-            f"http://{ip}/_upload",
-            **kwargs,
-            data={
-                "data": json.dumps({
-                    "method": "filesystem.put",
-                    "params": ["/tmp/upload"],
-                })
-            },
-            files={
-                "file": io.BytesIO(b"test"),
-            },
-            timeout=10
-        )
-        assert r.status_code == 403
diff --git a/tests/api2/test_rest_api_download.py b/tests/api2/test_rest_api_download.py
deleted file mode 100644
index 5657380d1338d..0000000000000
--- a/tests/api2/test_rest_api_download.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import errno
-import time
-
-import pytest
-import requests
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import unprivileged_user
-from middlewared.test.integration.utils import call, client, session, url
-
-
-@pytest.mark.parametrize("method", ["test_download_pipe", "test_download_unchecked_pipe"])
-def test_download(method):
-    with session() as s:
-        r = s.post(
-            f"{url()}/api/v2.0/resttest/{method}",
-            headers={"Content-type": "application/json"},
-            data="{\"key\": \"value\"}",
-        )
-        r.raise_for_status()
-        assert r.headers["Content-Type"] == "application/octet-stream"
-        assert r.text == '{"key": "value"}'
-
-
-def test_no_download_from_checked_pipe():
-    with session() as s:
-        r = s.post(
-            f"{url()}/api/v2.0/resttest/test_download_pipe?download=0",
-            headers={"Content-type": "application/json"},
-            data="{\"key\": \"value\"}",
-        )
-
-        assert r.status_code == 400
-        assert r.json()["message"] == "JSON response is not supported for this method."
-
-
-def test_no_download_from_unchecked_pipe():
-    with session() as s:
-        r = s.post(
-            f"{url()}/api/v2.0/resttest/test_download_unchecked_pipe?download=0",
-            headers={"Content-type": "application/json"},
-            data="{\"key\": \"value\"}",
-        )
-        r.raise_for_status()
-
-        assert r.headers["Content-Type"].startswith("application/json")
-        assert r.json() == {"wrapped": {"key": "value"}}
-
-
-def test_download_from_download_endpoint():
-    with client() as c:
-        job_id, path = c.call("core.download", "resttest.test_download_pipe", [{"key": "value"}], "file.bin")
-
-    r = requests.get(f"{url()}{path}")
-    r.raise_for_status()
-
-    assert r.headers["Content-Disposition"] == "attachment; filename=\"file.bin\""
-    assert r.headers["Content-Type"] == "application/octet-stream"
-    assert r.text == '{"key": "value"}'
-
-
-@pytest.mark.parametrize("buffered,sleep,result", [
-    (True, 0, ""),
-    (True, 4, '{"key": "value"}'),
-    (False, 0, '{"key": "value"}'),
-])
-def test_buffered_download_from_slow_download_endpoint(buffered, sleep, result):
-    with client() as c:
-        job_id, path = c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin",
-                              buffered)
-
-    time.sleep(sleep)
-
-    r = requests.get(f"{url()}{path}")
-    r.raise_for_status()
-
-    assert r.headers["Content-Disposition"] == "attachment; filename=\"file.bin\""
-    assert r.headers["Content-Type"] == "application/octet-stream"
-    assert r.text == result
-
-
-def test_download_duplicate_job():
-    call("core.download", "resttest.test_download_slow_pipe_with_lock", [{"key": "value"}], "file.bin")
-    with pytest.raises(CallError) as ve:
-        call("core.download", "resttest.test_download_slow_pipe_with_lock", [{"key": "value"}], "file.bin")
-
-    assert ve.value.errno == errno.EBUSY
-
-
-def test_download_authorization_ok():
-    with unprivileged_user(
-        username="unprivileged",
-        group_name="unprivileged_users",
-        privilege_name="Unprivileged users",
-        allowlist=[{"method": "CALL", "resource": "resttest.test_download_slow_pipe"}],
-        web_shell=False,
-    ) as user:
-        with client(auth=(user.username, user.password)) as c:
-            c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin")
-
-
-def test_download_authorization_fails():
-    with unprivileged_user(
-        username="unprivileged",
-        group_name="unprivileged_users",
-        privilege_name="Unprivileged users",
-        allowlist=[],
-        web_shell=False,
-    ) as user:
-        with client(auth=(user.username, user.password)) as c:
-            with pytest.raises(CallError) as ve:
-                c.call("core.download", "resttest.test_download_slow_pipe", [{"key": "value"}], "file.bin")
-
-            assert ve.value.errno == errno.EACCES
diff --git a/tests/api2/test_rest_api_upload.py b/tests/api2/test_rest_api_upload.py
deleted file mode 100644
index 1c37a0016fef3..0000000000000
--- a/tests/api2/test_rest_api_upload.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import io
-import json
-
-import pytest
-
-from middlewared.test.integration.utils import client, session, url
-
-
-@pytest.mark.parametrize("method", ["test_input_pipe", "test_input_unchecked_pipe"])
-def test_upload(method):
-    with session() as s:
-        r = s.post(
-            f"{url()}/api/v2.0/resttest/{method}",
-            files={
-                "data": (None, io.StringIO('{"key": "value"}')),
-                "file": (None, io.StringIO("FILE")),
-            },
-        )
-        r.raise_for_status()
-        job_id = r.json()
-
-    with client() as c:
-        assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}FILE'
-
-
-def test_no_upload_to_checked_pipe():
-    with session() as s:
-        r = s.post(
-            f"{url()}/api/v2.0/resttest/test_input_pipe",
-            headers={"Content-type": "application/json"},
-            data="{\"key\": \"value\"}",
-        )
-
-        assert r.status_code == 400
-        assert r.json()["message"] == "This method accepts only multipart requests."
-
-
-def test_no_upload_to_unchecked_pipe():
-    with session() as s:
-        r = s.post(
-            f"{url()}/api/v2.0/resttest/test_input_unchecked_pipe",
-            headers={"Content-type": "application/json"},
-            data='{"key": "value"}',
-        )
-        r.raise_for_status()
-        job_id = r.json()
-
-    with client() as c:
-        assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}NONE'
-
-
-def test_upload_to_upload_endpoint():
-    with session() as s:
-        r = s.post(
-            f"{url()}/_upload",
-            files={
-                "data": (None, io.StringIO(json.dumps({
-                    "method": "resttest.test_input_pipe",
-                    "params": [{"key": "value"}]
-                }))),
-                "file": (None, io.StringIO("FILE")),
-            },
-        )
-        r.raise_for_status()
-        job_id = r.json()["job_id"]
-
-    with client() as c:
-        assert c.call("core.job_wait", job_id, job=True) == '{"key": "value"}FILE'
diff --git a/tests/api2/test_root_session_alert.py b/tests/api2/test_root_session_alert.py
deleted file mode 100644
index dbc3c7135e9db..0000000000000
--- a/tests/api2/test_root_session_alert.py
+++ /dev/null
@@ -1,37 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.product import product_type
-from middlewared.test.integration.utils.client import client, truenas_server
-from middlewared.test.integration.utils import call
-from time import sleep
-
-
-@pytest.fixture(scope="function")
-def set_product_type(request):
-    # force SCALE_ENTERPRISE product type
-    with product_type():
-        yield
-
-
-def get_session_alert(call_fn, session_id):
-    # sleep a little while to let auth event get logged
-    sleep(5)
-
-    alert = call_fn('alert.run_source', 'AdminSession')
-    assert alert
-
-    assert session_id in alert[0]['args']['sessions'], str(alert[0]['args'])
-
-
-def check_session_alert(call_fn):
-    session_id = call_fn('auth.sessions', [['current', '=', True]], {'get': True})['id']
-    get_session_alert(call_fn, session_id)
-
-
-def test_root_session(set_product_type):
-    # first check with our regular persistent session
-    check_session_alert(call)
-
-    with client(host_ip=truenas_server.ip) as c:
-        # check that we also pick up second alert
-        check_session_alert(c.call)
diff --git a/tests/api2/test_rsync_ssh_authentication.py b/tests/api2/test_rsync_ssh_authentication.py
deleted file mode 100644
index acd9903b7af40..0000000000000
--- a/tests/api2/test_rsync_ssh_authentication.py
+++ /dev/null
@@ -1,321 +0,0 @@
-import base64
-import contextlib
-import errno
-from unittest.mock import ANY
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors, ValidationError
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.keychain import localhost_ssh_credentials
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.unittest import RegexString
-
-
-@contextlib.contextmanager
-def task(data):
-    data = {
-        **data
-    }
-
-    task = call("rsynctask.create", data)
-
-    try:
-        yield task
-    finally:
-        call("rsynctask.delete", task["id"])
-
-
-def run_task(task, timeout=120):
-    call("rsynctask.run", task["id"], job=True, timeout=timeout)
-
-
-@pytest.fixture(scope="module")
-def localuser():
-    with dataset("localuser_homedir") as localuser_homedir:
-        with user({
-            "username": "localuser",
-            "full_name": "Local User",
-            "group_create": True,
-            "home": f"/mnt/{localuser_homedir}",
-            "password": "test1234",
-        }) as u:
-            yield u
-
-
-@pytest.fixture(scope="module")
-def remoteuser():
-    with dataset("remoteuser_homedir") as remoteuser_homedir:
-        with user({
-            "username": "remoteuser",
-            "full_name": "Remote User",
-            "group_create": True,
-            "home": f"/mnt/{remoteuser_homedir}",
-            "password": "test1234",
-        }) as u:
-            yield u
-
-
-@pytest.fixture(scope="module")
-def src(localuser):
-    with dataset("src") as src:
-        path = f"/mnt/{src}"
-        yield path
-
-
-@pytest.fixture(scope="module")
-def dst(remoteuser):
-    with dataset("dst") as dst:
-        path = f"/mnt/{dst}"
-        ssh(f"chown -R remoteuser:remoteuser {path}")
-        yield path
-
-
-@pytest.fixture(scope="module")
-def ssh_credentials(remoteuser):
-    with localhost_ssh_credentials(username="remoteuser") as c:
-        yield c
-
-
-@pytest.fixture(scope="module")
-def ipv6_ssh_credentials(remoteuser):
-    with localhost_ssh_credentials(url="http://[::1]", username="remoteuser") as c:
-        yield c
-
-
-@pytest.fixture(scope="function")
-def cleanup(localuser, src, dst):
-    ssh(f"rm -rf {localuser['home']}/.ssh")
-    ssh(f"rm -rf {src}/*", check=False)
-    ssh(f"touch {src}/test")
-    ssh(f"chown -R localuser:localuser {src}")
-    ssh(f"rm -rf {dst}/*", check=False)
-
-
-def test_no_credential_provided_create(cleanup, localuser, remoteuser, src, dst):
-    with pytest.raises(ValidationErrors) as e:
-        with task({
-            "path": f"{src}/",
-            "user": "localuser",
-            "remotehost": "remoteuser@localhost",
-            "remoteport": 22,
-            "mode": "SSH",
-            "remotepath": dst,
-        }):
-            pass
-
-    assert e.value.errors == [
-        ValidationError(
-            "rsync_task_create.user",
-            RegexString(".*you need a user with a private key.*"),
-            errno.EINVAL,
-        )
-    ]
-
-
-def test_home_directory_key_invalid_permissions(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
-    ssh(f"mkdir {localuser['home']}/.ssh")
-    call(
-        "filesystem.file_receive",
-        f"{localuser['home']}/.ssh/id_rsa",
-        base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
-        {"mode": 0o0644},
-    )
-    ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
-
-    with pytest.raises(ValidationErrors) as e:
-        with task({
-            "path": f"{src}/",
-            "user": "localuser",
-            "remotehost": "remoteuser@localhost",
-            "remoteport": 22,
-            "mode": "SSH",
-            "remotepath": dst,
-        }):
-            pass
-
-    assert e.value.errors == [
-        ValidationError(
-            "rsync_task_create.user",
-            RegexString("Permissions 644 .* are too open.*"),
-            errno.EINVAL,
-        )
-    ]
-
-
-@pytest.mark.parametrize("validate_rpath", [True, False])
-def test_home_directory_key_not_in_known_hosts(cleanup, localuser, remoteuser, src, dst, ssh_credentials,
-                                               validate_rpath):
-    ssh(f"mkdir {localuser['home']}/.ssh")
-    call(
-        "filesystem.file_receive",
-        f"{localuser['home']}/.ssh/id_rsa",
-        base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
-        {"mode": 0o600},
-    )
-    ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
-
-    with pytest.raises(ValidationErrors) as e:
-        with task({
-            "path": f"{src}/",
-            "user": "localuser",
-            "remotehost": "remoteuser@localhost",
-            "remoteport": 22,
-            "mode": "SSH",
-            "remotepath": dst,
-            "validate_rpath": validate_rpath,
-        }):
-            pass
-
-    assert e.value.errors == [
-        ValidationError(
-            "rsync_task_create.remotehost",
-            ANY,
-            ValidationError.ESSLCERTVERIFICATIONERROR,
-        )
-    ]
-
-
-def test_ssh_keyscan_does_not_duplicate_host_keys(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
-    ssh(f"mkdir {localuser['home']}/.ssh")
-    ssh(f"ssh-keyscan localhost >> {localuser['home']}/.ssh/known_hosts")
-    call(
-        "filesystem.file_receive",
-        f"{localuser['home']}/.ssh/id_rsa",
-        base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
-        {"mode": 0o600},
-    )
-    ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
-
-    known_hosts = ssh(f"cat {localuser['home']}/.ssh/known_hosts")
-
-    with task({
-        "path": f"{src}/",
-        "user": "localuser",
-        "remotehost": "remoteuser@localhost",
-        "remoteport": 22,
-        "mode": "SSH",
-        "remotepath": dst,
-        "ssh_keyscan": True,
-    }) as t:
-        pass
-
-    assert ssh(f"cat {localuser['home']}/.ssh/known_hosts") == known_hosts
-
-
-def test_home_directory_key(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
-    ssh(f"mkdir {localuser['home']}/.ssh")
-    call(
-        "filesystem.file_receive",
-        f"{localuser['home']}/.ssh/id_rsa",
-        base64.b64encode(ssh_credentials["keypair"]["attributes"]["private_key"].encode("ascii")).decode("ascii"),
-        {"mode": 0o600},
-    )
-    ssh(f"chown -R localuser:localuser {localuser['home']}/.ssh")
-
-    with task({
-        "path": f"{src}/",
-        "user": "localuser",
-        "remotehost": "remoteuser@localhost",
-        "remoteport": 22,
-        "mode": "SSH",
-        "remotepath": dst,
-        "ssh_keyscan": True,
-    }) as t:
-        run_task(t)
-
-    assert ssh(f"ls -1 {dst}") == "test\n"
-
-
-def test_ssh_credentials_key(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
-    with task({
-        "path": f"{src}/",
-        "user": "localuser",
-        "ssh_credentials": ssh_credentials["credentials"]["id"],
-        "mode": "SSH",
-        "remotepath": dst,
-    }) as t:
-        run_task(t)
-
-    assert ssh(f"ls -1 {dst}") == "test\n"
-
-
-def test_ssh_credentials_delete(cleanup, localuser, remoteuser, src, dst):
-    with localhost_ssh_credentials(username="remoteuser") as c:
-        path = f"{src}/"
-        with task({
-            "path": path,
-            "user": "localuser",
-            "ssh_credentials": c["credentials"]["id"],
-            "mode": "SSH",
-            "remotepath": dst,
-        }) as t:
-            assert call("keychaincredential.used_by", c["credentials"]["id"]) == [
-                {"title": f"Rsync task for {path!r}", "unbind_method": "disable"},
-            ]
-
-            call("keychaincredential.delete", c["credentials"]["id"], {"cascade": True})
-
-            t = call("rsynctask.get_instance", t["id"])
-            assert not t["enabled"]
-
-
-def test_state_persist(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
-    with task({
-        "path": f"{src}/",
-        "user": "localuser",
-        "ssh_credentials": ssh_credentials["credentials"]["id"],
-        "mode": "SSH",
-        "remotepath": dst,
-    }) as t:
-        run_task(t)
-
-        row = call("datastore.query", "tasks.rsync", [["id", "=", t["id"]]], {"get": True})
-        assert row["rsync_job"]["state"] == "SUCCESS"
-
-
-def test_local_path_with_whitespace(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
-    src = f"{src}/work stuff"
-    ssh(f"mkdir '{src}'")
-    ssh(f"touch '{src}/test2'")
-    ssh(f"chown -R localuser:localuser '{src}'")
-    with task({
-        "path": f"{src}/",
-        "user": "localuser",
-        "ssh_credentials": ssh_credentials["credentials"]["id"],
-        "mode": "SSH",
-        "remotepath": dst,
-    }) as t:
-        run_task(t)
-
-    assert ssh(f"ls -1 '{dst}'") == "test2\n"
-
-
-def test_remotepath_with_whitespace(cleanup, localuser, remoteuser, src, dst, ssh_credentials):
-    dst = f"{dst}/work stuff"
-    ssh(f"mkdir '{dst}'")
-    ssh(f"chown remoteuser:remoteuser '{dst}'")
-    with task({
-        "path": f"{src}/",
-        "user": "localuser",
-        "ssh_credentials": ssh_credentials["credentials"]["id"],
-        "mode": "SSH",
-        "remotepath": dst,
-    }) as t:
-        run_task(t)
-
-    assert ssh(f"ls -1 '{dst}'") == "test\n"
-
-
-def test_ipv6_ssh_credentials(cleanup, localuser, remoteuser, src, dst, ipv6_ssh_credentials):
-    with task({
-        "path": f"{src}/",
-        "user": "localuser",
-        "ssh_credentials": ipv6_ssh_credentials["credentials"]["id"],
-        "mode": "SSH",
-        "remotepath": dst,
-    }) as t:
-        run_task(t)
-
-    assert ssh(f"ls -1 {dst}") == "test\n"
diff --git a/tests/api2/test_run_as_user_impl.py b/tests/api2/test_run_as_user_impl.py
deleted file mode 100644
index 71f966af82e13..0000000000000
--- a/tests/api2/test_run_as_user_impl.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import sys
-import os
-from contextlib import contextmanager
-
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-from functions import wait_on_job
-from middlewared.test.integration.utils import call, ssh
-
-
-@contextmanager
-def create_cron_job(owner, ownerGroup, user):
-    test_folder = ssh('mktemp -d').strip()
-    ssh(f'chown -R {owner}:{ownerGroup} {test_folder}')
-    cron = call(
-        'cronjob.create', {
-            'command': f'touch {test_folder}/test.txt', 'user': user, 'stderr': False, 'stdout': False}
-    )
-    try:
-        yield cron
-    finally:
-        ssh(f'rm -rf {test_folder}')
-
-
-@contextmanager
-def run_cron_job(cron_id):
-    job_id = call('cronjob.run', cron_id)
-    try:
-        yield wait_on_job(job_id, 300)
-    finally:
-        call('cronjob.delete', cron_id)
-
-
-def test_01_running_as_valid_user():
-    with create_cron_job(owner='apps', ownerGroup='apps', user='apps') as cron_job:
-        with run_cron_job(cron_job['id']) as job_detail:
-            assert job_detail['results']['error'] is None
-
-
-def test_02_running_as_invalid_user():
-    with create_cron_job(owner='root', ownerGroup='root', user='apps') as cron_job:
-        with run_cron_job(cron_job['id']) as job_detail:
-            assert f'"{cron_job["command"]}" exited with 1' in job_detail['results']['error'], job_detail
diff --git a/tests/api2/test_schema_private.py b/tests/api2/test_schema_private.py
deleted file mode 100644
index 0d65767b63637..0000000000000
--- a/tests/api2/test_schema_private.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call, client, mock, ssh
-
-
-
-def test_private_params_do_not_leak_to_logs():
-    with mock("test.test1", """    
-        from middlewared.service import accepts
-        from middlewared.schema import Dict, Str
-
-        @accepts(Dict("test", Str("password", private=True)))
-        async def mock(self, args):
-            raise Exception()
-    """):
-        log_before = ssh("cat /var/log/middlewared.log")
-
-        with client(py_exceptions=False) as c:
-            with pytest.raises(Exception):
-                c.call("test.test1", {"password": "secret"})
-
-        log = ssh("cat /var/log/middlewared.log")[len(log_before):]
-        assert "Exception while calling test.test1(*[{'password': '********'}])" in log
-
-
-def test_private_params_do_not_leak_to_core_get_jobs():
-    with mock("test.test1", """    
-        from middlewared.service import accepts, job
-        from middlewared.schema import Dict, Str
-
-        @accepts(Dict("test", Str("password", private=True)))
-        @job()
-        async def mock(self, job, args):
-            return 42
-    """):
-        job_id = call("test.test1", {"password": "secret"})
-
-        job_descr = call("core.get_jobs", [["id", "=", job_id]], {"get": True})
-        assert job_descr["arguments"] == [{"password": "********"}]
diff --git a/tests/api2/test_serial_consoles.py b/tests/api2/test_serial_consoles.py
deleted file mode 100644
index 122d29f1c9183..0000000000000
--- a/tests/api2/test_serial_consoles.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call, ssh
-
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-
-def test_enabling_serial_port():
-    ports = call('system.advanced.serial_port_choices')
-    assert 'ttyS0' in ports, ports
-
-    for port in ports:
-        test_config = {'serialconsole': True, 'serialport': port}
-        config = call('system.advanced.update', test_config)
-        for k, v in test_config.items():
-            assert config[k] == v, config
-        assert_serial_port_configuration({p: p == port for p in ports})
-
-
-def test_disabling_serial_port():
-    ports = call('system.advanced.serial_port_choices')
-    assert 'ttyS0' in ports, ports
-
-    for port in ports:
-        test_config = {'serialconsole': False, 'serialport': port}
-        config = call('system.advanced.update', test_config)
-        for k, v in test_config.items():
-            assert config[k] == v, config
-        assert_serial_port_configuration({p: False for p in ports})
-
-
-def assert_serial_port_configuration(ports):
-    for port, enabled in ports.items():
-        is_enabled = ssh(f'systemctl is-enabled serial-getty@{port}.service', False).strip() == 'enabled'
-        assert is_enabled is enabled, f'{port!r} enabled assertion failed: {is_enabled!r} != {enabled!r}'
-        is_enabled = ssh(f'systemctl is-active --quiet serial-getty@{port}.service', False, True)['returncode'] == 0
-        assert is_enabled is enabled, f'{port!r} active assertion failed: {is_enabled!r} != {enabled!r}'
diff --git a/tests/api2/test_sharing_service_encrypted_dataset_info.py b/tests/api2/test_sharing_service_encrypted_dataset_info.py
deleted file mode 100644
index 7a8108372edce..0000000000000
--- a/tests/api2/test_sharing_service_encrypted_dataset_info.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import contextlib
-import pytest
-
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.pool import dataset
-
-
-PASSPHRASE = 'testing123'
-ENCRYPTION_PARAMETERS = {
-    'encryption': True,
-    'encryption_options': {
-        'algorithm': 'AES-256-GCM',
-        'pbkdf2iters': 350000,
-        'passphrase': PASSPHRASE,
-    },
-    'inherit_encryption': False,
-}
-
-
-@contextlib.contextmanager
-def lock_dataset(dataset_name):
-    try:
-        yield call('pool.dataset.lock', dataset_name, {'force_umount': True}, job=True)
-    finally:
-        call(
-            'pool.dataset.unlock', dataset_name, {
-                'datasets': [{'passphrase': PASSPHRASE, 'name': dataset_name}]
-            },
-            job=True,
-        )
-
-
-@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field', [
-    ('sharing.smb', {}, {'name': 'test_smb_share'}, 'path'),
-    ('sharing.nfs', {}, {},  'path'),
-    ('iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'}, {'name': 'test-extend'}, 'disk'),
-])
-def test_service_encrypted_dataset_default_info(namespace, dataset_creation_params, share_creation_params, path_field):
-    with dataset('test_sharing_locked_ds_info', data={
-        **ENCRYPTION_PARAMETERS,
-        **dataset_creation_params,
-    }) as ds:
-        path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}'
-        share_creation_params[path_field] = path
-        share = call(f'{namespace}.create', share_creation_params)
-        assert share['locked'] is False
-
-        with lock_dataset(ds):
-            assert call(f'{namespace}.get_instance', share['id'])['locked'] is True
-
-        assert call(f'{namespace}.get_instance', share['id'])['locked'] is False
-
-
-@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field,selected_fields', [
-    ('sharing.smb', {}, {'name': 'test_smb_share'}, 'path', [['path', 'name'], ['path', 'name', 'locked']]),
-    ('sharing.nfs', {}, {},  'path', [['path', 'hosts'], ['path', 'hosts', 'locked']]),
-    (
-        'iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'},
-        {'name': 'test-extend'}, 'disk',
-        [['name', 'type'], ['name', 'type', 'locked']]
-    ),
-])
-def test_service_encrypted_dataset_selected_info(
-    namespace, dataset_creation_params, share_creation_params, path_field, selected_fields,
-):
-    with dataset('test_sharing_locked_ds_info', data={
-        **ENCRYPTION_PARAMETERS,
-        **dataset_creation_params,
-    }) as ds:
-        path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}'
-        share_creation_params[path_field] = path
-        assert call(f'{namespace}.create', share_creation_params)['locked'] is False
-
-        with lock_dataset(ds):
-            for selected_field_entry in selected_fields:
-                for share in call(f'{namespace}.query', [], {'select': selected_field_entry}):
-                    assert set(share) == set(selected_field_entry)
-
-
-@pytest.mark.parametrize('namespace,dataset_creation_params,share_creation_params,path_field', [
-    ('sharing.smb', {}, {'name': 'test_smb_share'}, 'path'),
-    ('sharing.nfs', {}, {}, 'path'),
-    ('iscsi.extent', {'type': 'VOLUME', 'volsize': 268451840, 'volblocksize': '16K'}, {'name': 'test-extend'}, 'disk'),
-])
-def test_service_encrypted_dataset_retrieve_info_with_cache(
-    namespace, dataset_creation_params, share_creation_params, path_field
-):
-    with dataset('test_sharing_locked_ds_info', data={
-        **ENCRYPTION_PARAMETERS,
-        **dataset_creation_params,
-    }) as ds:
-        path = f'zvol/{ds}' if dataset_creation_params.get('type') == 'VOLUME' else f'/mnt/{ds}'
-        share = call(f'{namespace}.create', {**share_creation_params, path_field: path})
-        assert share['locked'] is False
-        with lock_dataset(ds):
-            assert call(
-                f'{namespace}.get_instance', share['id'], {'extra': {'retrieve_locked_info': False}}
-            ).get('locked') is None
-            cached_locked_value = call(
-                f'{namespace}.get_instance', share['id'], {'extra': {'use_cached_locked_datasets': True}}
-            )
-            locked_value = call(
-                f'{namespace}.get_instance', share['id'], {'extra': {'use_cached_locked_datasets': False}}
-            )
-            assert cached_locked_value == locked_value
diff --git a/tests/api2/test_simple_share.py b/tests/api2/test_simple_share.py
deleted file mode 100644
index 20f78fd3810a0..0000000000000
--- a/tests/api2/test_simple_share.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# -*- coding=utf-8 -*-
-import pytest
-import secrets
-import string
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call
-
-
-PASSWD = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))
-
-
-def test__smb_simple_share_validation():
-    existing_smb_users = [x['username'] for x in call('user.query', [['smb', '=', True]])]
-    assert len(existing_smb_users) == 0, str(existing_smb_users)
-
-    with pytest.raises(ValidationErrors):
-        call('sharing.smb.share_precheck')
-
-    with user({
-        "username": "simple_share_user",
-        "full_name": "simple_share_user",
-        "group_create": True,
-        "password": PASSWD,
-        "smb": True,
-    }):
-        # First check that basic call of this endpoint succeeds
-        call('sharing.smb.share_precheck')
-
-        # Verify works with basic share name
-        call('sharing.smb.share_precheck', {'name': 'test_share'})
-
-        # Verify raises error if share name invalid
-        with pytest.raises(ValidationErrors):
-            call('sharing.smb.share_precheck', {'name': 'test_share*'})
-
-        # Another variant of invalid name
-        with pytest.raises(ValidationErrors):
-            call('sharing.smb.share_precheck', {'name': 'gLobaL'})
-
-        with dataset('test_smb') as ds:
-            with smb_share(f'/mnt/{ds}', 'test_share'):
-                with pytest.raises(ValidationErrors):
-                    call('sharing.smb.share_precheck', {'name': 'test_share'})
diff --git a/tests/api2/test_smart_test_crud.py b/tests/api2/test_smart_test_crud.py
deleted file mode 100644
index e17b83da4bd5d..0000000000000
--- a/tests/api2/test_smart_test_crud.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import contextlib
-import re
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call
-
-
-@contextlib.contextmanager
-def smart_test(data):
-    test = call("smart.test.create", data)
-    try:
-        yield test
-    finally:
-        call("smart.test.delete", test["id"])
-
-
-def smart_test_disks(all_disks=False, disk_index=0):
-    if all_disks:
-        return {"all_disks": True}
-    else:
-        return {"disks": [sorted(call("smart.test.disk_choices").keys())[disk_index]]}
-
-
-@pytest.mark.parametrize("existing_all_disks", [False, True])
-@pytest.mark.parametrize("new_all_disks", [False, True])
-def test_smart_test_already_has_tests_for_this_type(existing_all_disks, new_all_disks):
-    if existing_all_disks:
-        error = "There already is an all-disks SHORT test"
-    else:
-        error = "The following disks already have SHORT test: sd[a-z]"
-
-    with smart_test({
-        "schedule": {
-            "hour": "0",
-            "dom": "*",
-            "month": "*",
-            "dow": "*",
-        },
-        **smart_test_disks(existing_all_disks),
-        "type": "SHORT",
-    }):
-        with pytest.raises(ValidationErrors) as ve:
-            with smart_test({
-                "schedule": {
-                    "hour": "1",
-                    "dom": "*",
-                    "month": "*",
-                    "dow": "*",
-                },
-                **smart_test_disks(new_all_disks),
-                "type": "SHORT",
-            }):
-                pass
-
-        assert re.fullmatch(error, ve.value.errors[0].errmsg)
-
-
-@pytest.mark.parametrize("existing_all_disks", [False, True])
-@pytest.mark.parametrize("new_all_disks", [False, True])
-def test_smart_test_intersect(existing_all_disks, new_all_disks):
-    with smart_test({
-        "schedule": {
-            "hour": "3",
-            "dom": "1",
-            "month": "*",
-            "dow": "*",
-        },
-        **smart_test_disks(existing_all_disks),
-        "type": "LONG",
-    }):
-        with pytest.raises(ValidationErrors) as ve:
-            with smart_test({
-                "schedule": {
-                    "hour": "3",
-                    "dom": "*",
-                    "month": "*",
-                    "dow": "1",
-                },
-                **smart_test_disks(existing_all_disks),
-                "type": "SHORT",
-            }):
-                pass
-
-        assert ve.value.errors[0].errmsg == "A LONG test already runs at Day 1st of every month, Mon, 03:00"
-
-
-def test_smart_test_update():
-    with smart_test({
-        "schedule": {
-            "hour": "0",
-            "dom": "*",
-            "month": "*",
-            "dow": "*",
-        },
-        **smart_test_disks(True),
-        "type": "SHORT",
-    }) as test:
-        call("smart.test.update", test["id"], {})
diff --git a/tests/api2/test_smart_test_run.py b/tests/api2/test_smart_test_run.py
deleted file mode 100644
index e753830fb6629..0000000000000
--- a/tests/api2/test_smart_test_run.py
+++ /dev/null
@@ -1,64 +0,0 @@
-import contextlib
-import re
-import time
-
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call, client, mock
-
-
-@pytest.fixture(scope="function")
-def short_test():
-    disk = call("disk.query")[0]
-    with mock("smart.test.disk_choices", return_value={disk["identifier"]: disk}):
-        with mock("disk.smartctl", return_value="Self Test has begun"):
-            with mock("smart.test.results", """\
-                    i = 0
-                    async def mock(self, *args):
-                        global i
-                        if i > 100:
-                            return {"current_test": None}
-                        else:
-                            result = {"current_test": {"progress": i}}
-                            i += 30
-                            return result 
-            """):
-                result = call("smart.test.manual_test", [{"identifier": disk["identifier"], "type": "SHORT"}])
-                yield result[0]
-
-
-def test_smart_test_job_progress(short_test):
-    progresses = set()
-    for i in range(30):
-        job = call("core.get_jobs", [["id", "=", short_test["job"]]], {"get": True})
-        if job["state"] == "RUNNING":
-            progresses.add(job["progress"]["percent"])
-            time.sleep(5)
-        elif job["state"] == "SUCCESS":
-            break
-        else:
-            assert False, job
-    else:
-        assert False
-
-    assert progresses == {0, 30, 60, 90}
-
-
-def test_smart_test_event_source(short_test):
-    progresses = set()
-
-    def callback(event_type, **kwargs):
-        progresses.add(kwargs['fields']['progress'])
-
-    with client() as c:
-        c.subscribe(f"smart.test.progress:{short_test['disk']}", callback, sync=True)
-
-        for i in range(30):
-            if None in progresses:
-                assert progresses - {0} == {30, 60, 90, None}
-                break
-            else:
-                time.sleep(5)
-        else:
-            assert False
diff --git a/tests/api2/test_smb_client.py b/tests/api2/test_smb_client.py
deleted file mode 100644
index c9b98eeef39ef..0000000000000
--- a/tests/api2/test_smb_client.py
+++ /dev/null
@@ -1,243 +0,0 @@
-import os
-import pytest
-
-from middlewared.test.integration.assets.account import user, group
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import (
-    del_stream, get_stream, list_stream, set_stream, set_xattr_compat,
-    smb_share, smb_mount
-)
-from middlewared.test.integration.utils import call, client, ssh
-
-
-PERMSET = {
-    "READ_DATA": False,
-    "WRITE_DATA": False,
-    "APPEND_DATA": False,
-    "READ_NAMED_ATTRS": False,
-    "WRITE_NAMED_ATTRS": False,
-    "EXECUTE": False,
-    "DELETE_CHILD": False,
-    "READ_ATTRIBUTES": False,
-    "WRITE_ATTRIBUTES": False,
-    "DELETE": False,
-    "READ_ACL": False,
-    "WRITE_ACL": False,
-    "WRITE_OWNER": False,
-    "SYNCHRONIZE": True
-}
-
-SAMPLE_ENTRY = {
-    "tag": "GROUP",
-    "id": 666,
-    "type": "ALLOW",
-    "perms": PERMSET,
-    "flags": {"BASIC": "INHERIT"}
-}
-
-PERSISTENT_ACL = [
-    {
-        "tag": "GROUP",
-        "id": 545,
-        "type": "ALLOW",
-        "perms": {"BASIC": "FULL_CONTROL"},
-        "flags": {"BASIC": "INHERIT"}
-    }
-]
-
-TMP_SMB_USER_PASSWORD = 'Abcd1234$'
-
-
-@pytest.fixture(scope='module')
-def setup_smb_tests(request):
-    with dataset('smbclient-testing', data={'share_type': 'SMB'}) as ds:
-        with user({
-            'username': 'smbuser',
-            'full_name': 'smbuser',
-            'group_create': True,
-            'password': TMP_SMB_USER_PASSWORD
-        }) as u:
-            with smb_share(os.path.join('/mnt', ds), 'client_share') as s:
-                try:
-                    call('service.start', 'cifs')
-                    yield {'dataset': ds, 'share': s, 'user': u}
-                finally:
-                    call('service.stop', 'cifs')
-
-
-@pytest.fixture(scope='module')
-def mount_share(setup_smb_tests):
-    with smb_mount(setup_smb_tests['share']['name'], 'smbuser', TMP_SMB_USER_PASSWORD) as mp:
-        yield setup_smb_tests | {'mountpoint': mp}
-
-
-def compare_acls(local_path, share_path):
-    local_acl = call('filesystem.getacl', local_path)
-    local_acl.pop('path')
-    smb_acl = call('filesystem.getacl', share_path)
-    smb_acl.pop('path')
-    assert local_acl == smb_acl
-
-
-def test_smb_mount(request, mount_share):
-    assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs'
-
-
-def test_acl_share_root(request, mount_share):
-    compare_acls(mount_share['share']['path'], mount_share['mountpoint'])
-
-
-def test_acl_share_subdir(request, mount_share):
-    call('filesystem.mkdir', {
-        'path': os.path.join(mount_share['share']['path'], 'testdir'),
-        'options': {'raise_chmod_error': False},
-    })
-
-    compare_acls(
-        os.path.join(mount_share['share']['path'], 'testdir'),
-        os.path.join(mount_share['mountpoint'], 'testdir')
-    )
-
-
-def test_acl_share_file(request, mount_share):
-    ssh(f'touch {os.path.join(mount_share["share"]["path"], "testfile")}')
-
-    compare_acls(
-        os.path.join(mount_share['share']['path'], 'testfile'),
-        os.path.join(mount_share['mountpoint'], 'testfile')
-    )
-
-
-@pytest.mark.parametrize('perm', PERMSET.keys())
-def test_acl_share_permissions(request, mount_share, perm):
-    assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs'
-
-    SAMPLE_ENTRY['perms'] | {perm: True}
-    payload = {
-        'path': mount_share['share']['path'],
-        'dacl': [SAMPLE_ENTRY] + PERSISTENT_ACL
-    }
-    call('filesystem.setacl', payload, job=True)
-    compare_acls(mount_share['share']['path'], mount_share['mountpoint'])
-
-
-@pytest.mark.parametrize('flagset', [
-    {
-        'FILE_INHERIT': True,
-        'DIRECTORY_INHERIT': True,
-        'NO_PROPAGATE_INHERIT': False,
-        'INHERIT_ONLY': False,
-        'INHERITED': False,
-    },
-    {
-        'FILE_INHERIT': True,
-        'DIRECTORY_INHERIT': False,
-        'NO_PROPAGATE_INHERIT': False,
-        'INHERIT_ONLY': False,
-        'INHERITED': False,
-    },
-    {
-        'FILE_INHERIT': False,
-        'DIRECTORY_INHERIT': True,
-        'NO_PROPAGATE_INHERIT': False,
-        'INHERIT_ONLY': False,
-        'INHERITED': False,
-    },
-    {
-        'FILE_INHERIT': False,
-        'DIRECTORY_INHERIT': False,
-        'NO_PROPAGATE_INHERIT': False,
-        'INHERIT_ONLY': False,
-        'INHERITED': False,
-    },
-    {
-        'FILE_INHERIT': True,
-        'DIRECTORY_INHERIT': False,
-        'NO_PROPAGATE_INHERIT': False,
-        'INHERIT_ONLY': True,
-        'INHERITED': False,
-    },
-    {
-        'FILE_INHERIT': False,
-        'DIRECTORY_INHERIT': True,
-        'NO_PROPAGATE_INHERIT': False,
-        'INHERIT_ONLY': True,
-        'INHERITED': False,
-    },
-    {
-        'FILE_INHERIT': True,
-        'DIRECTORY_INHERIT': False,
-        'NO_PROPAGATE_INHERIT': True,
-        'INHERIT_ONLY': True,
-        'INHERITED': False,
-    },
-    {
-        'FILE_INHERIT': False,
-        'DIRECTORY_INHERIT': True,
-        'NO_PROPAGATE_INHERIT': True,
-        'INHERIT_ONLY': True,
-        'INHERITED': False,
-    }
-])
-def test_acl_share_flags(request, mount_share, flagset):
-    assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs'
-
-    SAMPLE_ENTRY['flags'] = flagset
-    payload = {
-        'path': mount_share['share']['path'],
-        'dacl': [SAMPLE_ENTRY] + PERSISTENT_ACL
-    }
-    call('filesystem.setacl', payload, job=True)
-    compare_acls(mount_share['share']['path'], mount_share['mountpoint'])
-
-
-def do_stream_ops(fname, samba_compat):
-    set_xattr_compat(samba_compat)
-
-    assert list_stream(fname) == []
-
-    data_to_write = b'canary'
-    if samba_compat:
-        data_to_write += b'\x00'
-
-    # test basic get / set
-    set_stream(fname, 'teststream', data_to_write)
-
-    assert list_stream(fname) == ['teststream']
-
-    xat_data = get_stream(fname, 'teststream')
-    assert xat_data == data_to_write
-
-    data_to_write = b'can'
-    if samba_compat:
-        data_to_write += b'\x00'
-
-    # test that stream is appropriately truncated
-    set_stream(fname, 'teststream', data_to_write)
-
-    xat_data = get_stream(fname, 'teststream')
-    assert xat_data == data_to_write
-
-    # test that stream can be deleted
-    del_stream(fname, 'teststream')
-
-    assert list_stream(fname) == []
-
-
-@pytest.mark.parametrize("is_dir", [True, False])
-@pytest.mark.parametrize("samba_compat", [True, False])
-def test_get_set_del_stream(request, mount_share, is_dir, samba_compat):
-    assert call('filesystem.statfs', mount_share['mountpoint'])['fstype'] == 'cifs'
-    if is_dir:
-        fname = os.path.join(mount_share['mountpoint'], 'testdirstream')
-        call('filesystem.mkdir', {'path': fname, 'options': {'raise_chmod_error': False}})
-        cleanup = f'rmdir {fname}'
-    else:
-        fname = os.path.join(mount_share['mountpoint'], 'testfilestream')
-        ssh(f'touch {fname}')
-        cleanup = f'rm {fname}'
-
-    try:
-        do_stream_ops(fname, samba_compat)
-    finally:
-        ssh(cleanup)
diff --git a/tests/api2/test_smb_encryption.py b/tests/api2/test_smb_encryption.py
deleted file mode 100644
index 61493cfbef063..0000000000000
--- a/tests/api2/test_smb_encryption.py
+++ /dev/null
@@ -1,133 +0,0 @@
-import os
-import pytest
-
-from contextlib import contextmanager
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-from protocols import smb_connection
-
-SHAREUSER = 'smbuser420'
-PASSWD = 'abcd1234'
-SMB_NAME = 'enc_share'
-
-
-@pytest.fixture(scope='module')
-def smb_setup(request):
-    with dataset('smb-encrypt', data={'share_type': 'SMB'}) as ds:
-        with user({
-            'username': SHAREUSER,
-            'full_name': SHAREUSER,
-            'group_create': True,
-            'password': PASSWD
-        }, get_instance=False):
-            with smb_share(os.path.join('/mnt', ds), SMB_NAME) as s:
-                try:
-                    call('service.start', 'cifs')
-                    yield {'dataset': ds, 'share': s}
-                finally:
-                    call('service.stop', 'cifs')
-
-
-@contextmanager
-def server_encryption(param):
-    call('smb.update', {'encryption': param})
-
-    try:
-        yield
-    finally:
-        call('smb.update', {'encryption': 'DEFAULT'})
-
-
-def test__smb_client_encrypt_default(smb_setup):
-    with smb_connection(
-        share=smb_setup['share']['name'],
-        username=SHAREUSER,
-        password=PASSWD,
-        encryption='DEFAULT'
-    ) as c:
-        # perform basic op to fully initialize SMB session
-        assert c.get_smb_encryption() == 'DEFAULT'
-
-        c.ls('/')
-        smb_status = call('smb.status')[0]
-
-        # check session
-        assert smb_status['encryption']['cipher'] == '-'
-        assert smb_status['encryption']['degree'] == 'none'
-
-        # check share
-        assert smb_status['share_connections'][0]['encryption']['cipher'] == '-'
-        assert smb_status['share_connections'][0]['encryption']['degree'] == 'none'
-
-
-def test__smb_client_encrypt_desired(smb_setup):
-    with smb_connection(
-        share=smb_setup['share']['name'],
-        username=SHAREUSER,
-        password=PASSWD,
-        encryption='DESIRED'
-    ) as c:
-        assert c.get_smb_encryption() == 'DESIRED'
-
-        # perform basic op to fully initialize SMB session
-        c.ls('/')
-        smb_status = call('smb.status')[0]
-
-        # check session
-        assert smb_status['encryption']['cipher'] == 'AES-128-GCM'
-        assert smb_status['encryption']['degree'] == 'partial'
-
-        # check share
-        assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM'
-        assert smb_status['share_connections'][0]['encryption']['degree'] == 'full'
-
-
-def test__smb_client_encrypt_required(smb_setup):
-    with smb_connection(
-        share=smb_setup['share']['name'],
-        username=SHAREUSER,
-        password=PASSWD,
-        encryption='REQUIRED'
-    ) as c:
-        assert c.get_smb_encryption() == 'REQUIRED'
-
-        # perform basic op to fully initialize SMB session
-        c.ls('/')
-        smb_status = call('smb.status')[0]
-
-        # check session
-        assert smb_status['encryption']['cipher'] == 'AES-128-GCM'
-        assert smb_status['encryption']['degree'] == 'partial'
-
-        # check share
-        assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM'
-        assert smb_status['share_connections'][0]['encryption']['degree'] == 'full'
-
-
-@pytest.mark.parametrize('enc_param', ('DESIRED', 'REQUIRED'))
-def test__smb_client_server_encrypt(smb_setup, enc_param):
-    with server_encryption(enc_param):
-        with smb_connection(
-            share=smb_setup['share']['name'],
-            username=SHAREUSER,
-            password=PASSWD,
-            encryption='DEFAULT'
-        ) as c:
-            # check that client credential desired encryption is
-            # set to expected value
-            assert c.get_smb_encryption() == 'DEFAULT'
-
-            # perform basic op to fully initialize SMB session
-            c.ls('/')
-            smb_status = call('smb.status')[0]
-
-            # check session
-            assert smb_status['encryption']['cipher'] == 'AES-128-GCM'
-            assert smb_status['encryption']['degree'] == 'full'
-
-            # check share
-            assert smb_status['share_connections'][0]['encryption']['cipher'] == 'AES-128-GCM'
-            assert smb_status['share_connections'][0]['encryption']['degree'] == 'full'
diff --git a/tests/api2/test_smb_groupmap.py b/tests/api2/test_smb_groupmap.py
deleted file mode 100644
index e914fa933753c..0000000000000
--- a/tests/api2/test_smb_groupmap.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import pytest
-
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.assets.account import group
-
-BASE_RID_GROUP = 200000
-
-
-@pytest.mark.parametrize('groupname,expected_memberof,expected_rid', [
-    ('builtin_administrators', 'S-1-5-32-544', 512),
-    ('builtin_guests', 'S-1-5-32-546', 514)
-])
-def test__local_builtin_accounts(groupname, expected_memberof, expected_rid):
-    entry = call('group.query', [['group', '=', groupname]], {'get': True})
-    rid = int(entry['sid'].split('-')[-1])
-    assert rid == expected_rid
-
-    groupmap = call('smb.groupmap_list')
-    assert str(entry['gid']) in groupmap['local_builtins']
-    assert groupmap['local_builtins'][str(entry['gid'])]['sid'] == entry['sid']
-
-    members = call('smb.groupmap_listmem', expected_memberof)
-    assert entry['sid'] in members
-
-
-def test__local_builtin_users_account():
-    entry = call('group.query', [['group', '=', 'builtin_users']], {'get': True})
-
-    rid = int(entry['sid'].split('-')[-1])
-    assert rid == entry['id'] + BASE_RID_GROUP
-
-    members_dom_users = call('smb.groupmap_listmem', 'S-1-5-32-545')
-    assert entry['sid'] in members_dom_users
-
-
-def test__new_group():
-    with group({"name": "group1"}) as g:
-        # Validate GID is being assigned as expected
-        assert g['sid'] is not None
-        rid = int(g['sid'].split('-')[-1])
-        assert rid == g['id'] + BASE_RID_GROUP
-
-        groupmap = call('smb.groupmap_list')
-
-        assert groupmap['local'][str(g['gid'])]['sid'] == g['sid']
-
-        # Validate that disabling SMB removes SID value from query results
-        call('group.update', g['id'], {'smb': False})
-
-        new = call('group.get_instance', g['id'])
-        assert new['sid'] is None
-
-        # Check for presence in group_mapping.tdb
-        groupmap = call('smb.groupmap_list')
-        assert new['gid'] not in groupmap['local']
-
-        # Validate that re-enabling restores SID value
-        call('group.update', g['id'], {'smb': True})
-        new = call('group.get_instance', g['id'])
-        assert new['sid'] == g['sid']
-
-        groupmap = call('smb.groupmap_list')
-        assert str(new['gid']) in groupmap['local']
-
-
-@pytest.mark.parametrize('name,gid,sid', [
-    ('Administrators', 90000001, 'S-1-5-32-544'),
-    ('Users', 90000002, 'S-1-5-32-545'),
-    ('Guests', 90000003, 'S-1-5-32-546')
-])
-def test__builtins(name, gid, sid):
-    builtins = call('smb.groupmap_list')['builtins']
-    assert str(gid) in builtins
diff --git a/tests/api2/test_smb_null_empty_dacl.py b/tests/api2/test_smb_null_empty_dacl.py
deleted file mode 100644
index 130e87af9f1ce..0000000000000
--- a/tests/api2/test_smb_null_empty_dacl.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import json
-import os
-import pytest
-
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.smb import security, smb_connection
-from samba import ntstatus, NTSTATUSError
-
-
-ADV_PERMS_FIELDS = [
-    'READ_DATA', 'WRITE_DATA', 'APPEND_DATA',
-    'READ_NAMED_ATTRS', 'WRITE_NAMED_ATTRS',
-    'EXECUTE',
-    'DELETE_CHILD', 'DELETE',
-    'READ_ATTRIBUTES', 'WRITE_ATTRIBUTES',
-    'READ_ACL', 'WRITE_ACL',
-    'WRITE_OWNER',
-    'SYNCHRONIZE'
-]
-
-NULL_DACL_PERMS = {'BASIC': 'FULL_CONTROL'}
-EMPTY_DACL_PERMS = {perm: False for perm in ADV_PERMS_FIELDS}
-
-
-@pytest.fixture(scope='function')
-def share():
-    with dataset('null_dacl_test', {'share_type': 'SMB'}) as ds:
-        with smb_share(f'/mnt/{ds}', 'DACL_TEST_SHARE') as s:
-            yield {'ds': ds, 'share': s}
-
-
-def set_special_acl(path, special_acl_type):
-    match special_acl_type:
-        case 'NULL_DACL':
-            permset = NULL_DACL_PERMS
-        case 'EMPTY_DACL':
-            permset = EMPTY_DACL_PERMS
-        case _:
-            raise TypeError(f'[EDOOFUS]: {special_acl_type} unexpected special ACL type')
-
-    payload = json.dumps({'acl': [{
-        'tag': 'everyone@',
-        'id': -1,
-        'type': 'ALLOW',
-        'perms': permset,
-        'flags': {'BASIC': 'NOINHERIT'},
-    }]})
-    ssh(f'touch {path}')
-
-    # Use SSH to write to avoid middleware ACL normalization and validation
-    # that prevents writing these specific ACLs.
-    ssh(f"nfs4xdr_setfacl -j '{payload}' {path}")
-
-
-def test_null_dacl_set(unprivileged_user_fixture, share):
-    """ verify that setting NULL DACL results in expected ZFS ACL """
-    with smb_connection(
-        share=share['share']['name'],
-        username=unprivileged_user_fixture.username,
-        password=unprivileged_user_fixture.password,
-    ) as c:
-        fh = c.create_file('test_null_dacl', 'w')
-        current_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP)
-        current_sd.dacl = None
-        c.set_sd(fh, current_sd, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
-
-        new_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
-        assert new_sd.dacl is None
-
-        theacl = call('filesystem.getacl', os.path.join(share['share']['path'], 'test_null_dacl'))
-        assert len(theacl['acl']) == 1
-
-        assert theacl['acl'][0]['perms'] == NULL_DACL_PERMS
-        assert theacl['acl'][0]['type'] == 'ALLOW'
-        assert theacl['acl'][0]['tag'] == 'everyone@'
-
-
-def test_null_dacl_functional(unprivileged_user_fixture, share):
-    """ verify that NULL DACL grants write privileges """
-    testfile = os.path.join(share['share']['path'], 'test_null_dacl_write')
-    set_special_acl(testfile, 'NULL_DACL')
-    data = b'canary'
-
-    with smb_connection(
-        share=share['share']['name'],
-        username=unprivileged_user_fixture.username,
-        password=unprivileged_user_fixture.password,
-    ) as c:
-        fh = c.create_file('test_null_dacl_write', 'w')
-        current_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP)
-        assert current_sd.dacl is None
-
-        c.write(fh, data)
-        assert c.read(fh, 0, cnt=len(data)) == data
-
-
-def test_empty_dacl_set(unprivileged_user_fixture, share):
-    """ verify that setting empty DACL results in expected ZFS ACL """
-    with smb_connection(
-        share=share['share']['name'],
-        username=unprivileged_user_fixture.username,
-        password=unprivileged_user_fixture.password,
-    ) as c:
-        fh = c.create_file('test_empty_dacl', 'w')
-        current_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP)
-        current_sd.dacl = security.acl()
-        c.set_sd(fh, current_sd, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
-
-        new_sd = c.get_sd(fh, security.SECINFO_OWNER | security.SECINFO_GROUP | security.SECINFO_DACL)
-        assert new_sd.dacl.num_aces == 0
-
-        theacl = call('filesystem.getacl', os.path.join(share['share']['path'], 'test_empty_dacl'))
-        assert len(theacl['acl']) == 1
-
-        assert theacl['acl'][0]['perms'] == EMPTY_DACL_PERMS
-        assert theacl['acl'][0]['type'] == 'ALLOW'
-        assert theacl['acl'][0]['tag'] == 'everyone@'
-
-
-def test_empty_dacl_functional(unprivileged_user_fixture, share):
-    testfile = os.path.join(share['share']['path'], 'test_empty_dacl_write')
-    set_special_acl(testfile, 'EMPTY_DACL')
-
-    with smb_connection(
-        share=share['share']['name'],
-        username=unprivileged_user_fixture.username,
-        password=unprivileged_user_fixture.password,
-    ) as c:
-        # File has empty ACL and is not owned by this user
-        with pytest.raises(NTSTATUSError) as nt_err:
-            c.create_file('test_empty_dacl_write', 'w')
-
-        assert nt_err.value.args[0] == ntstatus.NT_STATUS_ACCESS_DENIED
diff --git a/tests/api2/test_smb_share_crud_roles.py b/tests/api2/test_smb_share_crud_roles.py
deleted file mode 100644
index 4a43998b8c88e..0000000000000
--- a/tests/api2/test_smb_share_crud_roles.py
+++ /dev/null
@@ -1,78 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.roles import common_checks
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call
-
-
-@pytest.fixture(scope='module')
-def create_dataset():
-    with dataset('smb_roles_test') as ds:
-        yield ds
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_SMB_READ"])
-def test_read_role_can_read(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "sharing.smb.query", role, True, valid_role_exception=False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_READ", "SHARING_SMB_READ"])
-def test_read_role_cant_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "sharing.smb.create", role, False)
-    common_checks(unprivileged_user_fixture, "sharing.smb.update", role, False)
-    common_checks(unprivileged_user_fixture, "sharing.smb.delete", role, False)
-
-    common_checks(unprivileged_user_fixture, "sharing.smb.getacl", role, True)
-    common_checks(unprivileged_user_fixture, "sharing.smb.setacl", role, False)
-    common_checks(unprivileged_user_fixture, "smb.status", role, False)
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_SMB_WRITE"])
-def test_write_role_can_write(unprivileged_user_fixture, role):
-    common_checks(unprivileged_user_fixture, "sharing.smb.create", role, True)
-    common_checks(unprivileged_user_fixture, "sharing.smb.update", role, True)
-    common_checks(unprivileged_user_fixture, "sharing.smb.delete", role, True)
-
-    common_checks(unprivileged_user_fixture, "sharing.smb.getacl", role, True)
-    common_checks(unprivileged_user_fixture, "sharing.smb.setacl", role, True)
-    common_checks(unprivileged_user_fixture, "smb.status", role, True, valid_role_exception=False)
-
-    common_checks(
-        unprivileged_user_fixture, "service.start", role, True, method_args=["cifs"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.restart", role, True, method_args=["cifs"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.reload", role, True, method_args=["cifs"], valid_role_exception=False
-    )
-    common_checks(
-        unprivileged_user_fixture, "service.stop", role, True, method_args=["cifs"], valid_role_exception=False
-    )
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_SMB_WRITE"])
-def test_auxsmbconf_rejected_create(create_dataset, role):
-    share = None
-    with unprivileged_user_client(roles=[role]) as c:
-        with pytest.raises(ValidationErrors) as ve:
-            try:
-                share = c.call('sharing.smb.create', {
-                    'name': 'FAIL',
-                    'path': f'/mnt/{create_dataset}',
-                    'auxsmbconf': 'test:param = CANARY'
-                })
-            finally:
-                if share:
-                    call('sharing.smb.delete', share['id'])
-
-
-@pytest.mark.parametrize("role", ["SHARING_WRITE", "SHARING_SMB_WRITE"])
-def test_auxsmbconf_rejected_update(create_dataset, role):
-    with smb_share(f'/mnt/{create_dataset}', 'FAIL') as share:
-        with unprivileged_user_client(roles=[role]) as c:
-            with pytest.raises(ValidationErrors):
-                c.call('sharing.smb.update', share['id'], {'auxsmbconf': 'test:param = Bob'})
diff --git a/tests/api2/test_snapshot_count_alert.py b/tests/api2/test_snapshot_count_alert.py
deleted file mode 100644
index 7eca0422213bd..0000000000000
--- a/tests/api2/test_snapshot_count_alert.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import pytest
-from pytest_dependency import depends
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.smb import smb_share
-from middlewared.test.integration.utils import call, mock
-from time import sleep
-
-
-DATASET_NAME = "snapshot_count"
-NUM_SNAPSHOTS = 10
-
-
-def test_snapshot_total_count_alert(request):
-    with dataset(DATASET_NAME) as ds:
-        base = call("zfs.snapshot.query", [], {"count": True})
-        with mock("pool.snapshottask.max_total_count", return_value=base + NUM_SNAPSHOTS):
-            for i in range(NUM_SNAPSHOTS):
-                call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{i}"})
-
-            assert call("alert.run_source", "SnapshotCount") == []
-            # snapshots_changed ZFS dataset property has 1 second resolution
-            sleep(1)
-
-            call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{NUM_SNAPSHOTS}"})
-
-            alert = call("alert.run_source", "SnapshotCount")[0]
-            assert alert["text"] % alert["args"] == (
-                f"Your system has more snapshots ({base + NUM_SNAPSHOTS + 1}) than recommended ({base + NUM_SNAPSHOTS}"
-                "). Performance or functionality might degrade."
-            )
-
-
-def test_snapshot_count_alert(request):
-    with (
-        dataset(DATASET_NAME) as ds,
-        smb_share(f"/mnt/{ds}", DATASET_NAME),
-        mock("pool.snapshottask.max_count", return_value=NUM_SNAPSHOTS)
-    ):
-            for i in range(NUM_SNAPSHOTS):
-                call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{i}"})
-
-            assert call("alert.run_source", "SnapshotCount") == []
-            # snapshots_changed ZFS dataset property has 1 second resolution
-            sleep(1)
-
-            call("zfs.snapshot.create", {"dataset": ds, "name": f"snap-{NUM_SNAPSHOTS}"})
-
-            alert = call("alert.run_source", "SnapshotCount")[0]
-            assert alert["text"] % alert["args"] == (
-                f"SMB share {ds} has more snapshots ({NUM_SNAPSHOTS + 1}) than recommended ({NUM_SNAPSHOTS}). File "
-                "Explorer may not display all snapshots in the Previous Versions tab."
-            )
diff --git a/tests/api2/test_snapshot_query.py b/tests/api2/test_snapshot_query.py
deleted file mode 100644
index a1245b5371e3f..0000000000000
--- a/tests/api2/test_snapshot_query.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.pool import dataset, pool, another_pool
-from middlewared.test.integration.utils import call
-
-
-
-@pytest.fixture(scope="module")
-def fixture1():
-    with another_pool():
-        with dataset("test"):
-            with dataset("test/test1"):
-                with dataset("test/test2"):
-                    with dataset("test", pool="test"):
-                        with dataset("test/test1", pool="test"):
-                            with dataset("test/test2", pool="test"):
-                                call(
-                                    "zfs.snapshot.create",
-                                    {"dataset": f"{pool}/test", "name": "snap-1", "recursive": True},
-                                )
-                                call(
-                                    "zfs.snapshot.create",
-                                    {"dataset": f"{pool}/test", "name": "snap-2", "recursive": True},
-                                )
-                                call(
-                                    "zfs.snapshot.create",
-                                    {"dataset": "test/test", "name": "snap-1", "recursive": True},
-                                )
-                                call(
-                                    "zfs.snapshot.create",
-                                    {"dataset": "test/test", "name": "snap-2", "recursive": True},
-                                )
-                                yield
-
-
-def test_query_all_names(fixture1):
-    names = {
-        snapshot["name"]
-        for snapshot in call("zfs.snapshot.query", [], {"select": ["name"]})
-    }
-    assert {f"{pool}/test@snap-1", f"{pool}/test@snap-2", f"{pool}/test/test1@snap-1", f"{pool}/test/test1@snap-2",
-            f"{pool}/test/test2@snap-1", f"{pool}/test/test2@snap-2",
-            f"test/test@snap-1", f"test/test@snap-2", f"test/test/test1@snap-1", f"test/test/test1@snap-2",
-            f"test/test/test2@snap-1", f"test/test/test2@snap-2"}.issubset(names)
-
-
-@pytest.mark.parametrize("filters,names", [
-    ([["pool", "=", "test"]], {f"test/test@snap-1", f"test/test@snap-2", f"test/test/test1@snap-1",
-                               f"test/test/test1@snap-2", f"test/test/test2@snap-1", f"test/test/test2@snap-2"}),
-    ([["dataset", "=", f"{pool}/test"]], {f"{pool}/test@snap-1", f"{pool}/test@snap-2"}),
-    ([["dataset", "in", [f"{pool}/test/test1", "test/test/test2"]]], {f"{pool}/test/test1@snap-1",
-                                                                      f"{pool}/test/test1@snap-2",
-                                                                      f"test/test/test2@snap-1",
-                                                                      f"test/test/test2@snap-2"}),
-])
-def test_query_names_by_pool_or_dataset(fixture1, filters, names):
-    assert {
-        snapshot["name"]
-        for snapshot in call("zfs.snapshot.query", filters, {"select": ["name"]})
-    } == names
diff --git a/tests/api2/test_snapshot_task.py b/tests/api2/test_snapshot_task.py
deleted file mode 100644
index 210651f12b837..0000000000000
--- a/tests/api2/test_snapshot_task.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import pytest
-
-from middlewared.service_exception import InstanceNotFound
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.snapshot_task import snapshot_task
-from middlewared.test.integration.utils import call
-
-import sys
-import os
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-
-def test_snapshot_task_is_not_deleted_when_deleting_a_child_dataset():
-    with dataset("parent") as parent:
-        with dataset("parent/child") as child:
-            with snapshot_task({
-                "dataset": parent,
-                "recursive": True,
-                "lifetime_value": 1,
-                "lifetime_unit": "DAY",
-                "naming_schema": "%Y%m%d%H%M",
-            }) as t:
-                call("pool.dataset.delete", child)
-
-                assert call("pool.snapshottask.get_instance", t["id"])
-
-
-def test_snapshot_task_is_deleted_when_deleting_a_parent_dataset():
-    with dataset("parent") as parent:
-        with dataset("parent/child") as child:
-            with snapshot_task({
-                "dataset": child,
-                "recursive": True,
-                "lifetime_value": 1,
-                "lifetime_unit": "DAY",
-                "naming_schema": "%Y%m%d%H%M",
-            }) as t:
-                call("pool.dataset.delete", parent, {"recursive": True})
-
-                with pytest.raises(InstanceNotFound):
-                    assert call("pool.snapshottask.get_instance", t["id"])
diff --git a/tests/api2/test_snapshot_task_retention.py b/tests/api2/test_snapshot_task_retention.py
deleted file mode 100644
index 72dec238e940d..0000000000000
--- a/tests/api2/test_snapshot_task_retention.py
+++ /dev/null
@@ -1,112 +0,0 @@
-from datetime import datetime
-from unittest.mock import ANY
-
-import pytz
-
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.snapshot_task import snapshot_task
-from middlewared.test.integration.utils import assert_creates_job, call
-
-
-def test_change_retention():
-    tz = pytz.timezone(call("system.info")["timezone"])
-
-    with dataset("snapshottask-retention-test") as ds:
-        call("zettarepl.load_removal_dates")
-
-        with snapshot_task({
-            "dataset": ds,
-            "recursive": True,
-            "exclude": [],
-            "lifetime_value": 10,
-            "lifetime_unit": "YEAR",
-            "naming_schema": "auto-%Y-%m-%d-%H-%M-1y",
-            "schedule": {
-                "minute": "*",
-            },
-        }) as task:
-            call("zfs.snapshot.create", {
-                "dataset": ds,
-                "name": "auto-2021-04-12-06-30-1y",
-            })
-
-            result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]],
-                          {"get": True, "extra": {"retention": True}})
-            assert result["retention"] == {
-                "datetime": ANY,
-                "source": "periodic_snapshot_task",
-                "periodic_snapshot_task_id": task["id"],
-            }
-            assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30))
-
-            result = call("pool.snapshottask.update_will_change_retention_for", task["id"], {
-                "naming_schema": "auto-%Y-%m-%d-%H-%M-365d",
-            })
-            assert result == {
-                ds: ["auto-2021-04-12-06-30-1y"],
-            }
-
-            with assert_creates_job("pool.snapshottask.fixate_removal_date") as job:
-                call("pool.snapshottask.update", task["id"], {
-                    "naming_schema": "auto-%Y-%m-%d-%H-%M-365d",
-                    "fixate_removal_date": True,
-                })
-
-            call("core.job_wait", job.id, job=True)
-
-            result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]],
-                          {"get": True, "extra": {"retention": True}})
-            properties = [v for k, v in result["properties"].items() if k.startswith("org.truenas:destroy_at_")]
-            assert properties, result["properties"]
-            assert properties[0]["value"] == "2031-04-10T06:30:00"
-            assert result["retention"] == {
-                "datetime": ANY,
-                "source": "property",
-            }
-            assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30))
-
-
-def test_delete_retention():
-    tz = pytz.timezone(call("system.info")["timezone"])
-
-    with dataset("snapshottask-retention-test-2") as ds:
-        call("zettarepl.load_removal_dates")
-
-        with snapshot_task({
-            "dataset": ds,
-            "recursive": True,
-            "exclude": [],
-            "lifetime_value": 10,
-            "lifetime_unit": "YEAR",
-            "naming_schema": "auto-%Y-%m-%d-%H-%M-1y",
-            "schedule": {
-                "minute": "*",
-            },
-        }) as task:
-            call("zfs.snapshot.create", {
-                "dataset": ds,
-                "name": "auto-2021-04-12-06-30-1y",
-            })
-
-            result = call("pool.snapshottask.delete_will_change_retention_for", task["id"])
-            assert result == {
-                ds: ["auto-2021-04-12-06-30-1y"],
-            }
-
-            with assert_creates_job("pool.snapshottask.fixate_removal_date") as job:
-                call("pool.snapshottask.delete", task["id"], {
-                    "fixate_removal_date": True,
-                })
-
-            call("core.job_wait", job.id, job=True)
-
-            result = call("zfs.snapshot.query", [["id", "=", f"{ds}@auto-2021-04-12-06-30-1y"]],
-                          {"get": True, "extra": {"retention": True}})
-            properties = [v for k, v in result["properties"].items() if k.startswith("org.truenas:destroy_at_")]
-            assert properties, result["properties"]
-            assert properties[0]["value"] == "2031-04-10T06:30:00"
-            assert result["retention"] == {
-                "datetime": ANY,
-                "source": "property",
-            }
-            assert result["retention"]["datetime"].astimezone(tz) == tz.localize(datetime(2031, 4, 10, 6, 30))
diff --git a/tests/api2/test_snapshots.py b/tests/api2/test_snapshots.py
deleted file mode 100644
index 4cff87b341bed..0000000000000
--- a/tests/api2/test_snapshots.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import errno
-
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-
-def common_min_max_txg_snapshot_test(test_min_txg=False, test_max_txg=False):
-    assert all(i is False for i in (test_min_txg, test_max_txg)) is False
-
-    with dataset('test') as test_dataset:
-        created_snaps = []
-        total_snaps = 20
-        for i in range(total_snaps):
-            created_snaps.append(int(call(
-                'zfs.snapshot.create', {'dataset': test_dataset, 'name': f'snap_{i}'}
-            )['properties']['createtxg']['value']))
-
-        assert call('zfs.snapshot.query', [['dataset', '=', test_dataset]], {'count': True}) == len(created_snaps)
-
-        for i in range(total_snaps // 2 - 1):
-            new_list = created_snaps
-            extra_args = {}
-            if test_min_txg:
-                new_list = created_snaps[i:]
-                extra_args['min_txg'] = new_list[0]
-            if test_max_txg:
-                new_list = new_list[:len(new_list) // 2]
-                extra_args['max_txg'] = new_list[-1]
-
-            assert call(
-                'zfs.snapshot.query', [['dataset', '=', test_dataset]], {'count': True, 'extra': extra_args}
-            ) == len(new_list)
-
-
-def test_min_txg_snapshot_query():
-    common_min_max_txg_snapshot_test(True, False)
-
-
-def test_max_txg_snapshot_query():
-    common_min_max_txg_snapshot_test(False, True)
-
-
-def test_min_max_txg_snapshot_query():
-    common_min_max_txg_snapshot_test(True, True)
-
-
-def test_already_exists():
-    with dataset('test') as test_dataset:
-        call('zfs.snapshot.create', {'dataset': test_dataset, 'name': 'snap'})
-        with pytest.raises(CallError) as ve:
-            call('zfs.snapshot.create', {'dataset': test_dataset, 'name': 'snap'})
-
-        assert ve.value.errno == errno.EEXIST
diff --git a/tests/api2/test_snmp_agent.py b/tests/api2/test_snmp_agent.py
deleted file mode 100644
index 40a67dc2addf1..0000000000000
--- a/tests/api2/test_snmp_agent.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import re
-import subprocess
-import tempfile
-import time
-
-import pytest
-
-from middlewared.test.integration.utils import call, host, ssh
-
-
-@pytest.fixture()
-def snmpd_running():
-    call("service.start", "snmp")
-    time.sleep(2)
-    yield
-
-
-def test_truenas_mib_elements(snmpd_running):
-    mib_file = "/usr/local/share/snmp/mibs/TRUENAS-MIB.txt"
-    with tempfile.NamedTemporaryFile(mode='w') as f:
-        lines = ssh(f'cat {mib_file}')
-        assert lines
-
-        f.writelines(lines)
-        f.flush()
-
-        snmp = subprocess.run(
-            f"snmpwalk -v2c -c public -m {f.name} {host().ip} "
-            "1.3.6.1.4.1.50536",
-            shell=True,
-            capture_output=True,
-            text=True,
-        )
-        assert snmp.returncode == 0, snmp.stderr
-        assert "TRUENAS-MIB::zpoolName.1 = STRING: boot-pool\n" in snmp.stdout
-        assert re.search(
-            r"^TRUENAS-MIB::zfsArcSize\.0 = Gauge32: ([1-9][0-9]+)\n", snmp.stdout, re.MULTILINE
-        ), snmp.stdout
diff --git a/tests/api2/test_staticroutes.py b/tests/api2/test_staticroutes.py
deleted file mode 100644
index fac14ca4bc3f6..0000000000000
--- a/tests/api2/test_staticroutes.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors
-from middlewared.test.integration.utils import call, ssh
-
-ROUTE = {
-    "destination": "127.1.1.1",
-    "gateway": "127.0.0.1",
-    "description": "Test Route",
-}
-BAD_ROUTE = {"destination": "fe80:aaaa:bbbb:cccc::1/64", "gateway": ROUTE["gateway"]}
-
-
-def test_staticroute():
-    """
-    1. try to create invalid route
-    2. create valid route
-    3. validate route was added to OS
-    4. try to update valid route with invalid data
-    5. delete route
-    6. validate route was removed from OS
-    """
-    # try to create bad route
-    with pytest.raises(ValidationErrors):
-        call("staticroute.create", BAD_ROUTE)
-
-    # now create valid one
-    id_ = call("staticroute.create", ROUTE)["id"]
-
-    # validate query
-    qry = call("staticroute.query", [["id", "=", id_]], {"get": True})
-    assert ROUTE["destination"] in qry["destination"]
-    assert ROUTE["gateway"] == qry["gateway"]
-    assert ROUTE["description"] == qry["description"]
-
-    # validate route was added to OS
-    results = ssh(f"ip route show {ROUTE['destination']}", complete_response=True)
-    assert f"{ROUTE['destination']} via {ROUTE['gateway']}" in results["stdout"]
-
-    # update it with bad data
-    with pytest.raises(ValidationErrors):
-        call("staticroute.update", id_, {"destination": BAD_ROUTE["destination"]})
-
-    # now delete
-    assert call("staticroute.delete", id_)
-    assert not call("staticroute.query", [["id", "=", id_]])
-
-    # validate route was removed from OS
-    results = ssh(
-        f"ip route show {ROUTE['destination']}", complete_response=True, check=False
-    )
-    assert ROUTE["destination"] not in results["stdout"]
diff --git a/tests/api2/test_system_advanced.py b/tests/api2/test_system_advanced.py
deleted file mode 100644
index 3f0351dd0df26..0000000000000
--- a/tests/api2/test_system_advanced.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import pytest
-
-from middlewared.service_exception import ValidationErrors, ValidationError
-from middlewared.test.integration.utils import call, ssh
-
-
-@pytest.mark.parametrize(
-    'key,value,grep_file,sshd_config_cmd,validation_error', [
-        ('motd', 'TrueNAS Message Of The Day', '/etc/motd', None, ''),
-        ('login_banner', 'TrueNAS Login Banner', '/etc/login_banner', 'grep Banner /etc/ssh/sshd_config', ''),
-        ('kernel_extra_options', 'zfs_arc_min=21474836480', None, None, ''),
-        ('kernel_extra_options', '', None, None, ''),
-        ('kernel_extra_options', 'zfs_arc_min=<21474836480>', None, None, 'Invalid syntax'),
-    ],
-    ids=[
-        'Test MOTD',
-        'Test Login Banner',
-        'Test Valid Kernel Extra Options 1',
-        'Test Valid Kernel Extra Options 2',
-        'Test Invalid Kernel Extra Options 1',
-    ],
-)
-def test_(key, value, grep_file, sshd_config_cmd, validation_error):
-    if not validation_error:
-        call('system.advanced.update', {key: value})
-        assert call('system.advanced.config')[key] == value
-        if grep_file is not None:
-            assert ssh(f'grep "{value}" {grep_file}', complete_response=True)['result']
-        if sshd_config_cmd is not None:
-            assert ssh(sshd_config_cmd, complete_response=True)['result']
-    else:
-        with pytest.raises(ValidationErrors) as ve:
-            call('system.advanced.update', {key: value})
-        assert ve.value.errors == [ValidationError(key, validation_error)]
diff --git a/tests/api2/test_system_dataset.py b/tests/api2/test_system_dataset.py
deleted file mode 100644
index d635c6ab092bf..0000000000000
--- a/tests/api2/test_system_dataset.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import errno
-import os
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, pool
-
-
-PASSPHRASE = 'passphrase'
-
-
-@pytest.fixture(scope="module")
-def passphrase_encrypted_pool_session():
-    with another_pool({"encryption": True, "encryption_options": {"passphrase": PASSPHRASE}}) as p:
-        yield p["name"]
-
-
-@pytest.fixture(scope="function")
-def passphrase_encrypted_pool(passphrase_encrypted_pool_session):
-    config = call("systemdataset.config")
-    assert config["pool"] == pool
-
-    try:
-        call("pool.dataset.delete", passphrase_encrypted_pool_session, {"recursive": True})
-    except CallError as e:
-        if e.errno != errno.ENOENT:
-            raise
-
-    # If root dataset is locked, let's unlock it here
-    # It can be locked if some test locks it but does not unlock it later on and we should have
-    # a clean slate whenever we are trying to test using this pool/root dataset
-    if call("pool.dataset.get_instance", passphrase_encrypted_pool_session)["locked"]:
-        call("pool.dataset.unlock", passphrase_encrypted_pool_session, {
-            "datasets": [{"name": passphrase_encrypted_pool_session, "passphrase": PASSPHRASE}],
-        })
-
-    yield passphrase_encrypted_pool_session
-
-
-@pytest.mark.parametrize("lock", [False, True])
-def test_migrate_to_a_pool_with_passphrase_encrypted_root_dataset(passphrase_encrypted_pool, lock):
-    if lock:
-        call("pool.dataset.lock", passphrase_encrypted_pool, job=True)
-
-    assert passphrase_encrypted_pool in call("systemdataset.pool_choices")
-
-    call("systemdataset.update", {"pool": passphrase_encrypted_pool}, job=True)
-
-    ds = call("zfs.dataset.get_instance", f"{passphrase_encrypted_pool}/.system")
-    assert ds["properties"]["encryption"]["value"] == "off"
-
-    call("systemdataset.update", {"pool": pool}, job=True)
-
-
-def test_lock_passphrase_encrypted_pool_with_system_dataset(passphrase_encrypted_pool):
-    call("systemdataset.update", {"pool": passphrase_encrypted_pool}, job=True)
-
-    call("pool.dataset.lock", passphrase_encrypted_pool, job=True)
-
-    ds = call("zfs.dataset.get_instance", f"{passphrase_encrypted_pool}/.system")
-    assert ds["properties"]["mounted"]["value"] == "yes"
-
-    call("systemdataset.update", {"pool": pool}, job=True)
-
-
-def test_system_dataset_mountpoints():
-    system_config = call("systemdataset.config")
-    for system_dataset_spec in call(
-        "systemdataset.get_system_dataset_spec", system_config["pool"], system_config["uuid"]
-    ):
-        mount_point = system_dataset_spec.get("mountpoint") or os.path.join(
-            system_config["path"], os.path.basename(system_dataset_spec["name"])
-        )
-
-        ds_stats = call("filesystem.stat", mount_point)
-        assert ds_stats["uid"] == system_dataset_spec["chown_config"]["uid"]
-        assert ds_stats["gid"] == system_dataset_spec["chown_config"]["gid"]
-        assert ds_stats["mode"] & 0o777 == system_dataset_spec["chown_config"]["mode"]
-
-
-def test_netdata_post_mount_action():
-    # We rely on this to make sure system dataset post mount actions are working as intended
-    ds_stats = call("filesystem.stat", "/var/db/system/netdata/ix_state")
-    assert ds_stats["uid"] == 999, ds_stats
-    assert ds_stats["gid"] == 997, ds_stats
-    assert ds_stats["mode"] & 0o777 == 0o755, ds_stats
diff --git a/tests/api2/test_system_general.py b/tests/api2/test_system_general.py
deleted file mode 100644
index 1c9b97a7755ab..0000000000000
--- a/tests/api2/test_system_general.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from middlewared.test.integration.utils import call
-
-TIMEZONE = "America/New_York"
-
-def test_check_system_set_time():
-    """
-    This test intentionally slews our clock to be off
-    by 300 seconds and then verifies that it got set
-    """
-    results = call("system.info")
-
-    # Convert to seconds
-    datetime = int(results["datetime"].timestamp())
-
-    # hop 300 seconds into the past
-    target = datetime - 300
-    call("system.set_time", int(target))
-
-    results = call("system.info")
-    datetime2 = int(results["datetime"].timestamp())
-
-    # This is a fudge-factor because NTP will start working
-    # pretty quickly to correct the slew.
-    assert abs(target - datetime2) < 60
-
-
-def test_setting_timezone():
-    assert TIMEZONE in call("system.general.timezone_choices")
-    call("system.general.update", {"timezone": TIMEZONE})
-    assert call("system.general.config")["timezone"] == TIMEZONE
diff --git a/tests/api2/test_system_general_ui_allowlist.py b/tests/api2/test_system_general_ui_allowlist.py
deleted file mode 100644
index b24a01b9e9268..0000000000000
--- a/tests/api2/test_system_general_ui_allowlist.py
+++ /dev/null
@@ -1,84 +0,0 @@
-import socket
-import time
-
-import requests
-import websocket
-
-from middlewared.test.integration.utils import call, host, mock, ssh, url, websocket_url
-
-
-def test_system_general_ui_allowlist():
-    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
-    s.connect((host().ip, 1))  # connect() for UDP doesn't send packets
-    local_ip = s.getsockname()[0]
-
-    with mock("vm.query", return_value=[
-        {"id": 1, "name": ""},
-    ]):
-        with mock("vm.device.query", return_value=[
-            {"id": 1, "attributes": {"bind": "127.0.0.1", "port": 1, "web_port": 1}, "vm": 1}
-        ]):
-            try:
-                protected_endpoints = (
-                    "/_download",
-                    "/_upload",
-                    "/_plugins",
-                    "/api/docs",
-                    "/api/v2.0",
-                    "/progress",
-                    "/vm/display/1",
-                )
-                protected_ws_endpoints = (
-                    ("/websocket", '{"msg": "connect", "version": "1"}'),
-                    ("/websocket/shell", '{"token": "invalid"}'),
-                )
-
-                # Ensure we are testing endpoints that do not give 403 by default
-                for endpoint in protected_endpoints:
-                    r = requests.get(url() + endpoint, timeout=10)
-                    assert r.status_code != 403
-                for endpoint, message in protected_ws_endpoints:
-                    ws = websocket.create_connection(websocket_url() + endpoint)
-                    ws.send(message)
-                    resp_opcode, msg = ws.recv_data()
-                    assert resp_opcode == 1, msg
-
-                # Set `ui_allowlist` to IP we are using
-                call("system.general.update", {"ui_allowlist":  [local_ip]})
-                call("system.general.ui_restart", 0)
-                time.sleep(10)
-
-                # Check everything still works
-                for endpoint in protected_endpoints:
-                    r = requests.get(url() + endpoint, timeout=10)
-                    assert r.status_code != 403
-                for endpoint, message in protected_ws_endpoints:
-                    ws = websocket.create_connection(websocket_url() + endpoint)
-                    ws.send(message)
-                    resp_opcode, msg = ws.recv_data()
-                    assert resp_opcode == 1, msg
-
-                # Set it to an invalid IP
-                call("system.general.update", {"ui_allowlist": ["8.8.8.8"]})
-                call("system.general.ui_restart", 0)
-                time.sleep(10)
-
-                # Ensure we are still able to open the UI
-                r = requests.get(url(), timeout=10)
-                assert r.status_code == 200
-
-                # Ensure that we can't access API
-                for endpoint in protected_endpoints:
-                    r = requests.get(url() + endpoint, timeout=10)
-                    assert r.status_code == 403, (endpoint, r.text)
-                for endpoint, message in protected_ws_endpoints:
-                    ws = websocket.create_connection(websocket_url() + endpoint)
-                    ws.send(message)
-                    resp_opcode, msg = ws.recv_data()
-                    assert resp_opcode == 8, msg
-                    assert msg[2:].decode("utf-8") == "You are not allowed to access this resource"
-            finally:
-                # We are not allowed to access API, bring things back to normal via SSH
-                ssh("midclt call system.general.update '{\"ui_allowlist\": []}'")
-                ssh("midclt call system.general.ui_restart 0")
-                time.sleep(10)
diff --git a/tests/api2/test_system_general_ui_rollback.py b/tests/api2/test_system_general_ui_rollback.py
deleted file mode 100644
index 98b80cf7155d3..0000000000000
--- a/tests/api2/test_system_general_ui_rollback.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import time
-from contextlib import contextmanager
-
-from middlewared.test.integration.utils import call, client, ssh
-from middlewared.test.integration.utils.client import truenas_server
-
-ROLLBACK = 20
-UI_DELAY = 3
-ORIG_PORT = 80
-NEW_PORT = 81
-
-
-def fallback_ui_fix():
-    """Fix the UI port settings using SSH in case an
-    unexpected failure is met or we just want to reset
-    our changes"""
-    ssh(f"midclt call system.general.update '{{\"ui_port\": {ORIG_PORT}}}'")
-    ssh("midclt call system.general.ui_restart 0")
-    time.sleep(5)
-
-
-@contextmanager
-def client_with_timeout(host_ip=None, tries=30):
-    for _ in range(tries):
-        try:
-            with client(host_ip=host_ip) as c:
-                assert c.call("core.ping") == "pong"
-                yield c
-                break
-        except ConnectionRefusedError:
-            time.sleep(1)
-    else:
-        assert False, "Could not connect to client."
-
-
-def test_system_general_ui_rollback():
-    """This tests the following:
-        1. change the port the nginx service binds to (our UI)
-        2. ensure communication with the API on the original port failsinal port fails
-        3. ensure communication with the API on the new port succeeds
-        4. check the time left before the changes are rolled back
-        5. sleep that amount of time (plus a few seconds for a buffer)
-        6. ensure communication with the API on the original port succeeds
-        7. if any above steps fail, revert the UI port settings via ssh"""
-    try:
-        # Step 1
-        call(
-            "system.general.update",
-            {"ui_port": NEW_PORT, "rollback_timeout": ROLLBACK, "ui_restart_delay": UI_DELAY}
-        )
-
-        # Step 2
-        try:
-            assert call("core.ping") != "pong"
-        except Exception:
-            pass
-
-        # Step 3
-        with client_with_timeout(host_ip=f"{truenas_server.ip}:{NEW_PORT}") as c:
-            rollback_left = c.call("system.general.checkin_waiting")
-            # Step 4
-            assert rollback_left < ROLLBACK
-
-        # Step 5
-        time.sleep(rollback_left + 5)
-        # Step 6
-        assert call("core.ping") == "pong"
-    except Exception:
-        # Step 7
-        fallback_ui_fix()
-        raise
-
-
-def test_system_general_ui_checkin():
-    """This tests the following:
-        1. change the port the nginx service binds to (our UI)
-        2. immediately checkin the UI port changes
-        3. ensure we don't have a checkin pending
-        4. revert any UI port settings via ssh"""
-    try:
-        # Step 1
-        call(
-            "system.general.update",
-            {"ui_port": NEW_PORT, "rollback_timeout": ROLLBACK, "ui_restart_delay": UI_DELAY}
-        )
-
-        # Step 2
-        with client_with_timeout(host_ip=f"{truenas_server.ip}:{NEW_PORT}") as c:
-            # Step 3
-            c.call("system.general.checkin")
-            # Step 4
-            assert c.call("system.general.checkin_waiting") is None
-    finally:
-        fallback_ui_fix()
diff --git a/tests/api2/test_system_lifetime.py b/tests/api2/test_system_lifetime.py
deleted file mode 100644
index a1bd282279435..0000000000000
--- a/tests/api2/test_system_lifetime.py
+++ /dev/null
@@ -1,38 +0,0 @@
-import time
-
-import pytest
-
-from middlewared.test.integration.utils import call
-
-from auto_config import ha
-
-
-@pytest.mark.skipif(
-    ha,
-    reason="Cannot be tested on a HA system since rebooting this node will just fail over to another node",
-)
-def test_system_reboot():
-    boot_id = call("system.boot_id")
-
-    call("system.reboot", "Integration test")
-
-    for i in range(180):
-        try:
-            new_boot_id = call("system.boot_id")
-        except Exception:
-            pass
-        else:
-            if new_boot_id != boot_id:
-                break
-
-        time.sleep(1)
-    else:
-        assert False, "System did not reboot"
-
-    audit = call("audit.query", {
-        "services": ["MIDDLEWARE"],
-        "query-filters": [
-            ["event", "=", "REBOOT"],
-        ],
-    })
-    assert audit[-1]["event_data"] == {"reason": "Integration test"}
diff --git a/tests/api2/test_system_settings_roles.py b/tests/api2/test_system_settings_roles.py
deleted file mode 100644
index 48ead698987a5..0000000000000
--- a/tests/api2/test_system_settings_roles.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize('role,endpoint,payload,should_work,valid_role_exception,is_return_type_none', [
-    ('SYSTEM_GENERAL_READ', 'system.general.config', [], True, False, False),
-    ('READONLY_ADMIN', 'system.general.update', [{}], False, False, False),
-    ('SYSTEM_GENERAL_WRITE', 'system.general.update', [{}], True, False, False),
-    ('SYSTEM_ADVANCED_READ', 'system.advanced.config', [], True, False, False),
-    ('READONLY_ADMIN', 'system.advanced.update', [{}], False, False, False),
-    ('SYSTEM_ADVANCED_WRITE', 'system.advanced.update', [{}], True, False, False),
-    ('SYSTEM_ADVANCED_READ', 'system.advanced.sed_global_password', [], True, False, False),
-    ('READONLY_ADMIN', 'system.advanced.update_gpu_pci_ids', [None], False, True, False),
-    ('SYSTEM_ADVANCED_WRITE', 'system.advanced.update_gpu_pci_ids', [None], True, True, True),
-    ('SYSTEM_GENERAL_READ', 'system.general.local_url', [], True, False, False),
-])
-def test_system_settings_read_and_write_role(
-    unprivileged_user_fixture, role, endpoint, payload, should_work, valid_role_exception, is_return_type_none
-):
-    common_checks(
-        unprivileged_user_fixture, endpoint, role, should_work, is_return_type_none=is_return_type_none,
-        valid_role_exception=valid_role_exception, method_args=payload
-    )
diff --git a/tests/api2/test_system_vendor.py b/tests/api2/test_system_vendor.py
deleted file mode 100644
index 2d8c87759da11..0000000000000
--- a/tests/api2/test_system_vendor.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from middlewared.test.integration.utils import call, ssh
-
-
-SENTINEL_FILE_PATH = "/data/.vendor"
-
-
-def test_no_vendor_file():
-    file_exists = ssh(f"test -e {SENTINEL_FILE_PATH}", check=False, complete_response=True)["result"]
-    assert not file_exists
-    assert not call("system.vendor.is_vendored")
-
-
-def test_name_is_none():
-    vendor_name = call("system.vendor.name")
-    assert vendor_name is None
diff --git a/tests/api2/test_truecommand_roles.py b/tests/api2/test_truecommand_roles.py
deleted file mode 100644
index d04c873654d2f..0000000000000
--- a/tests/api2/test_truecommand_roles.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-def test_truecommand_readonly_role(unprivileged_user_fixture):
-    common_checks(
-        unprivileged_user_fixture, 'truenas.managed_by_truecommand', 'READONLY_ADMIN', True, valid_role_exception=False
-    )
-
-
-@pytest.mark.parametrize('endpoint,role,should_work,valid_role_exception', [
-    ('truecommand.config', 'TRUECOMMAND_READ', True, False),
-    ('truecommand.config', 'TRUECOMMAND_WRITE', True, False),
-    ('truecommand.info', 'TRUECOMMAND_READ', True, False),
-    ('truecommand.info', 'TRUECOMMAND_WRITE', True, False),
-    ('truecommand.update', 'TRUECOMMAND_READ', False, True),
-    ('truecommand.update', 'TRUECOMMAND_WRITE', True, True),
-])
-def test_truecommand_read_and_write_role(unprivileged_user_fixture, endpoint, role, should_work, valid_role_exception):
-    common_checks(
-        unprivileged_user_fixture, endpoint, role, should_work, valid_role_exception=valid_role_exception
-    )
diff --git a/tests/api2/test_truenas_verify.py b/tests/api2/test_truenas_verify.py
deleted file mode 100644
index 12d1bcdcc0637..0000000000000
--- a/tests/api2/test_truenas_verify.py
+++ /dev/null
@@ -1,9 +0,0 @@
-from middlewared.test.integration.utils import ssh
-
-
-def test_truenas_verify():
-    response = ssh('truenas_verify', check=False, complete_response=True)
-
-    # Jenkins vms alter the system files for setup, so truenas_verify should generate errors.
-    assert not response['result']
-    assert ssh('head /var/log/truenas_verify.log'), 'Test environment should log file verification errors.'
diff --git a/tests/api2/test_tunables.py b/tests/api2/test_tunables.py
deleted file mode 100644
index 2b57348139a0c..0000000000000
--- a/tests/api2/test_tunables.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import pytest
-
-from truenas_api_client import ValidationErrors
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.mock_binary import mock_binary
-
-SYSCTL = "kernel.watchdog"
-SYSCTL_DEFAULT_VALUE = "1"
-SYSCTL_NEW_VALUE = "0"
-
-ZFS = "zil_nocacheflush"
-ZFS_DEFAULT_VALUE = "0"
-ZFS_NEW_VALUE = "1"
-
-
-def test_create_invalid_sysctl():
-    with pytest.raises(ValidationErrors) as ve:
-        call("tunable.create", {
-            "type": "SYSCTL",
-            "var": "kernel.truenas",
-            "value": "1",
-        }, job=True)
-
-    assert ve.value.errors[0].attribute == "tunable_create.var"
-
-
-def test_create_invalid_udev():
-    with pytest.raises(ValidationErrors) as ve:
-        call("tunable.create", {
-            "type": "UDEV",
-            "var": "61-truenas-pmem",
-            "value": "# disable built-in truenas rule to enable memory loss",
-        }, job=True)
-
-    assert ve.value.errors[0].attribute == "tunable_create.var"
-
-
-def test_create_invalid_zfs():
-    with pytest.raises(ValidationErrors) as ve:
-        call("tunable.create", {
-            "type": "ZFS",
-            "var": "zfs_truenas",
-            "value": "1",
-        }, job=True)
-
-    assert ve.value.errors[0].attribute == "tunable_create.var"
-
-
-def test_sysctl_lifecycle():
-    def assert_default_value():
-        assert ssh("cat /etc/sysctl.d/tunables.conf", check=False) == f""
-        assert ssh(f"sysctl -n {SYSCTL}") == f"{SYSCTL_DEFAULT_VALUE}\n"
-
-    def assert_new_value():
-        assert ssh("cat /etc/sysctl.d/tunables.conf") == f"{SYSCTL}={SYSCTL_NEW_VALUE}\n"
-        assert ssh(f"sysctl -n {SYSCTL}") == f"{SYSCTL_NEW_VALUE}\n"
-
-    assert_default_value()
-
-    tunable = call("tunable.create", {
-        "type": "SYSCTL",
-        "var": SYSCTL,
-        "value": SYSCTL_NEW_VALUE,
-    }, job=True)
-
-    assert_new_value()
-
-    call("tunable.update", tunable["id"], {
-        "enabled": False,
-    }, job=True)
-
-    assert_default_value()
-
-    call("tunable.update", tunable["id"], {
-        "enabled": True,
-    }, job=True)
-
-    assert_new_value()
-
-    call("tunable.delete", tunable["id"], job=True)
-
-    assert_default_value()
-
-
-def test_udev_lifecycle():
-    def assert_exists():
-        assert ssh("cat /etc/udev/rules.d/10-disable-usb.rules") == f"BUS==\"usb\", OPTIONS+=\"ignore_device\"\n"
-
-    def assert_does_not_exist():
-        assert ssh("cat /etc/udev/rules.d/10-disable-usb.rules", check=False) == f""
-
-    tunable = call("tunable.create", {
-        "type": "UDEV",
-        "var": "10-disable-usb",
-        "value": "BUS==\"usb\", OPTIONS+=\"ignore_device\""
-    }, job=True)
-
-    assert_exists()
-
-    call("tunable.update", tunable["id"], {
-        "enabled": False,
-    }, job=True)
-
-    assert_does_not_exist()
-
-    call("tunable.update", tunable["id"], {
-        "enabled": True,
-    }, job=True)
-
-    assert_exists()
-
-    call("tunable.delete", tunable["id"], job=True)
-
-    assert_does_not_exist()
-
-
-def test_zfs_lifecycle():
-    with mock_binary("/usr/sbin/update-initramfs", exitcode=0):
-        def assert_default_value():
-            assert ssh("cat /etc/modprobe.d/zfs.conf", check=False) == f""
-            assert ssh(f"cat /sys/module/zfs/parameters/{ZFS}") == f"{ZFS_DEFAULT_VALUE}\n"
-
-        def assert_new_value():
-            assert ssh("cat /etc/modprobe.d/zfs.conf", check=False) == f"options zfs {ZFS}={ZFS_NEW_VALUE}\n"
-            assert ssh(f"cat /sys/module/zfs/parameters/{ZFS}") == f"{ZFS_NEW_VALUE}\n"
-
-        assert_default_value()
-
-        tunable = call("tunable.create", {
-            "type": "ZFS",
-            "var": ZFS,
-            "value": ZFS_NEW_VALUE,
-        }, job=True)
-
-        assert_new_value()
-
-        call("tunable.update", tunable["id"], {
-            "enabled": False,
-        }, job=True)
-
-        assert_default_value()
-
-        call("tunable.update", tunable["id"], {
-            "enabled": True,
-        }, job=True)
-
-        assert_new_value()
-
-        call("tunable.delete", tunable["id"], job=True)
-
-        assert_default_value()
-
-
-def test_arc_max_set():
-    tunable = call("tunable.create", {"type": "ZFS", "var": "zfs_arc_max", "value": 8675309}, job=True)
-    try:
-        val = ssh("cat /sys/module/zfs/parameters/zfs_arc_max")
-    finally:
-        call("tunable.delete", tunable["id"], job=True)
-
-    assert int(val.strip()) == 8675309
-
-    mount_info = call("filesystem.mount_info", [["mountpoint", "=", "/"]], {"get": True})
-    assert "RO" in mount_info["super_opts"]
diff --git a/tests/api2/test_twofactor_auth.py b/tests/api2/test_twofactor_auth.py
deleted file mode 100644
index 7a4773bdefe62..0000000000000
--- a/tests/api2/test_twofactor_auth.py
+++ /dev/null
@@ -1,282 +0,0 @@
-import contextlib
-from datetime import datetime, timezone
-import errno
-
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import user as user_create
-from middlewared.test.integration.assets.two_factor_auth import enabled_twofactor_auth, get_user_secret, get_2fa_totp_token
-from middlewared.test.integration.assets.account import unprivileged_user
-from middlewared.test.integration.utils import call, client
-
-
-TEST_USERNAME = 'test2fauser'
-TEST_USERNAME_2 = 'test2fauser2'
-TEST_PASSWORD = 'testpassword'
-TEST_PASSWORD_2 = 'testpassword2'
-TEST_GID = 544
-TEST_TWOFACTOR_INTERVAL = {'interval': 60}
-USERS_2FA_CONF = {
-    TEST_USERNAME: {'interval': 30, 'otp_digits': 6},
-    TEST_USERNAME_2: {'interval': 40, 'otp_digits': 7}
-}
-
-
-@contextlib.contextmanager
-def user(data: dict):
-    data['group'] = call('group.query', [['gid', '=', TEST_GID]], {'get': True})['id']
-    with user_create(data) as user_obj:
-        yield user_obj
-
-
-@pytest.fixture(scope='function')
-def clear_ratelimit():
-    call('rate.limit.cache_clear')
-
-
-@pytest.fixture(scope='module', autouse=True)
-def ensure_small_time_difference():
-    nas_time = call('system.info')['datetime']
-    local_time = datetime.now(timezone.utc)
-    if abs((nas_time - local_time).total_seconds()) > 5:
-        raise Exception(f'Time difference between NAS ({nas_time!r}) and test client ({local_time}) is too large')
-
-
-def do_login(username, password, otp=None, expected=True):
-    with client(auth=None) as c:
-        resp = c.call('auth.login_ex', {
-            'mechanism': 'PASSWORD_PLAIN',
-            'username': username,
-            'password': password,
-        })
-        if not otp and expected:
-            assert resp['response_type'] == 'SUCCESS'
-        elif not otp and not expected:
-            assert resp['response_type'] in ('AUTH_ERR', 'OTP_REQUIRED')
-        else:
-            assert resp['response_type'] == 'OTP_REQUIRED'
-
-        if not otp:
-            return
-
-        resp = c.call('auth.login_ex_continue', {
-            'mechanism': 'OTP_TOKEN',
-            'otp_token': otp
-        })
-        if expected:
-            assert resp['response_type'] == 'SUCCESS'
-        else:
-            assert resp['response_type'] == 'OTP_REQUIRED'
-
-
-def test_login_without_2fa(clear_ratelimit):
-    with user({
-        'username': TEST_USERNAME,
-        'password': TEST_PASSWORD,
-        'full_name': TEST_USERNAME,
-    }):
-        do_login(TEST_USERNAME, TEST_PASSWORD)
-
-
-@pytest.mark.parametrize("user_name,password,renew_options", [
-    ('test_user1', 'test_password1', {'interval': 30, 'otp_digits': 6}),
-    ('test_user2', 'test_password2', {'interval': 60, 'otp_digits': 7}),
-    ('test_user3', 'test_password3', {'interval': 50, 'otp_digits': 8}),
-])
-def test_secret_generation_for_user(user_name, password, renew_options, clear_ratelimit):
-    with user({
-        'username': user_name,
-        'password': password,
-        'full_name': user_name,
-    }) as user_obj:
-        assert get_user_secret(user_obj['id'], False) != []
-        assert get_user_secret(user_obj['id'])['secret'] is None
-
-        call('user.renew_2fa_secret', user_obj['username'], renew_options)
-
-        user_secret_obj = get_user_secret(user_obj['id'])
-        assert user_secret_obj['secret'] is not None
-        for k in ('interval', 'otp_digits'):
-            assert user_secret_obj[k] == renew_options[k]
-
-
-def test_secret_generation_for_multiple_users(clear_ratelimit):
-    with user({
-        'username': TEST_USERNAME,
-        'password': TEST_PASSWORD,
-        'full_name': TEST_USERNAME,
-    }) as first_user:
-        call('user.renew_2fa_secret', first_user['username'], USERS_2FA_CONF[first_user['username']])
-        with user({
-            'username': TEST_USERNAME_2,
-            'password': TEST_PASSWORD_2,
-            'full_name': TEST_USERNAME_2,
-        }) as second_user:
-            call('user.renew_2fa_secret', second_user['username'], USERS_2FA_CONF[second_user['username']])
-            for user_obj in (first_user, second_user):
-                user_secret_obj = get_user_secret(user_obj['id'])
-                assert user_secret_obj['secret'] is not None
-                for k in ('interval', 'otp_digits'):
-                    assert user_secret_obj[k] == USERS_2FA_CONF[user_obj['username']][k]
-
-
-def test_login_without_otp_for_user_without_2fa(clear_ratelimit):
-    with user({
-        'username': TEST_USERNAME_2,
-        'password': TEST_PASSWORD_2,
-        'full_name': TEST_USERNAME_2,
-    }):
-        with enabled_twofactor_auth():
-            do_login(TEST_USERNAME_2, TEST_PASSWORD_2)
-
-
-def test_login_with_otp_for_user_with_2fa(clear_ratelimit):
-    with user({
-        'username': TEST_USERNAME_2,
-        'password': TEST_PASSWORD_2,
-        'full_name': TEST_USERNAME_2,
-    }) as user_obj:
-        with enabled_twofactor_auth():
-            call('user.renew_2fa_secret', user_obj['username'], TEST_TWOFACTOR_INTERVAL)
-            do_login(TEST_USERNAME_2, TEST_PASSWORD_2, get_2fa_totp_token(get_user_secret(user_obj['id'])))
-
-
-def test_user_2fa_secret_renewal(clear_ratelimit):
-    with user({
-        'username': TEST_USERNAME_2,
-        'password': TEST_PASSWORD_2,
-        'full_name': TEST_USERNAME_2,
-    }) as user_obj:
-        with enabled_twofactor_auth():
-            call('user.renew_2fa_secret', user_obj['username'], TEST_TWOFACTOR_INTERVAL)
-            do_login(TEST_USERNAME_2, TEST_PASSWORD_2, get_2fa_totp_token(get_user_secret(user_obj['id'])))
-            secret = get_user_secret(user_obj['id'])
-
-            call('user.renew_2fa_secret', user_obj['username'], TEST_TWOFACTOR_INTERVAL)
-            call('user.get_instance', user_obj['id'])
-            assert get_user_secret(user_obj['id'])['secret'] != secret
-            do_login(TEST_USERNAME_2, TEST_PASSWORD_2, get_2fa_totp_token(get_user_secret(user_obj['id'])))
-
-
-def test_restricted_user_2fa_secret_renewal(clear_ratelimit):
-    with unprivileged_user(
-        username=TEST_USERNAME,
-        group_name='TEST_2FA_GROUP',
-        privilege_name='TEST_2FA_PRIVILEGE',
-        allowlist=[],
-        web_shell=False,
-        roles=['READONLY_ADMIN']
-    ) as acct:
-        with enabled_twofactor_auth():
-            with client(auth=(acct.username, acct.password)) as c:
-                with pytest.raises(CallError) as ve:
-                    # Trying to renew another user's 2fa token should fail
-                    c.call('user.renew_2fa_secret', "root", TEST_TWOFACTOR_INTERVAL)
-
-                assert ve.value.errno == errno.EPERM
-
-                c.call('user.renew_2fa_secret', acct.username, TEST_TWOFACTOR_INTERVAL)
-                user_obj = call('user.query', [['username', '=', acct.username]], {'get': True})
-                do_login(acct.username, acct.password, get_2fa_totp_token(get_user_secret(user_obj['id'])))
-
-                secret = get_user_secret(user_obj['id'])
-
-                c.call('user.renew_2fa_secret', acct.username, TEST_TWOFACTOR_INTERVAL)
-                assert get_user_secret(user_obj['id'])['secret'] != secret
-
-                do_login(acct.username, acct.password, get_2fa_totp_token(get_user_secret(user_obj['id'])))
-
-
-def test_multiple_users_login_with_otp(clear_ratelimit):
-    with user({
-        'username': TEST_USERNAME,
-        'password': TEST_PASSWORD,
-        'full_name': TEST_USERNAME,
-    }) as first_user:
-        with enabled_twofactor_auth():
-            do_login(TEST_USERNAME, TEST_PASSWORD)
-
-            with user({
-                'username': TEST_USERNAME_2,
-                'password': TEST_PASSWORD_2,
-                'full_name': TEST_USERNAME_2,
-            }) as second_user:
-                call('user.renew_2fa_secret', second_user['username'], TEST_TWOFACTOR_INTERVAL)
-                otp_token = get_2fa_totp_token(get_user_secret(second_user['id']))
-                do_login(TEST_USERNAME_2, TEST_PASSWORD_2, otp_token)
-
-                # verify we can't replay same token
-                do_login(TEST_USERNAME_2, TEST_PASSWORD_2, otp_token)
-
-                # Verify 2FA still required
-                do_login(TEST_USERNAME_2, TEST_PASSWORD_2, expected=False)
-
-                call('user.renew_2fa_secret', first_user['username'], TEST_TWOFACTOR_INTERVAL)
-                do_login(TEST_USERNAME, TEST_PASSWORD, get_2fa_totp_token(get_user_secret(first_user['id'])))
-
-
-def test_login_with_otp_failure(clear_ratelimit):
-    """ simulate continually fat-fingering OTP token until eventual failure """
-    with user({
-        'username': TEST_USERNAME,
-        'password': TEST_PASSWORD,
-        'full_name': TEST_USERNAME,
-    }) as u:
-        with enabled_twofactor_auth():
-            call('user.renew_2fa_secret', u['username'], TEST_TWOFACTOR_INTERVAL)
-
-            with client(auth=None) as c:
-                resp = c.call('auth.login_ex', {
-                    'mechanism': 'PASSWORD_PLAIN',
-                    'username': TEST_USERNAME,
-                    'password': TEST_PASSWORD,
-                })
-                assert resp['response_type'] == 'OTP_REQUIRED'
-                retry_cnt = 0
-
-                while retry_cnt < 3:
-                    resp = c.call('auth.login_ex_continue', {
-                        'mechanism': 'OTP_TOKEN',
-                        'otp_token': 'canary'
-                    })
-                    assert resp['response_type'] == 'OTP_REQUIRED', retry_cnt
-                    retry_cnt += 1
-
-                # We've now exhausted any grace from server. Hammer is dropped.
-                resp = c.call('auth.login_ex_continue', {
-                    'mechanism': 'OTP_TOKEN',
-                    'otp_token': 'canary'
-                })
-                assert resp['response_type'] == 'AUTH_ERR'
-
-
-def test_login_with_otp_switch_account(clear_ratelimit):
-    """ Validate we can abandon a login attempt with 2FA """
-    with user({
-        'username': TEST_USERNAME,
-        'password': TEST_PASSWORD,
-        'full_name': TEST_USERNAME,
-    }) as u:
-        with user({
-            'username': TEST_USERNAME_2,
-            'password': TEST_PASSWORD_2,
-            'full_name': TEST_USERNAME_2,
-        }):
-            with enabled_twofactor_auth():
-                call('user.renew_2fa_secret', u['username'], TEST_TWOFACTOR_INTERVAL)
-
-                with client(auth=None) as c:
-                    resp = c.call('auth.login_ex', {
-                        'mechanism': 'PASSWORD_PLAIN',
-                        'username': TEST_USERNAME,
-                        'password': TEST_PASSWORD,
-                    })
-                    assert resp['response_type'] == 'OTP_REQUIRED'
-
-                    resp = c.call('auth.login_ex', {
-                        'mechanism': 'PASSWORD_PLAIN',
-                        'username': TEST_USERNAME_2,
-                        'password': TEST_PASSWORD_2,
-                    })
-                    assert resp['response_type'] == 'SUCCESS'
diff --git a/tests/api2/test_ui_caching.py b/tests/api2/test_ui_caching.py
deleted file mode 100644
index dcd2ab94d25bb..0000000000000
--- a/tests/api2/test_ui_caching.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import re
-
-import pytest
-import requests
-
-from middlewared.test.integration.utils import url
-
-RE_MAIN_SCRIPT = re.compile(r'<script src="(main[.-].+\.js)" type="module">')
-
-
-@pytest.mark.parametrize("path", ["/", "/ui", "/ui/", "/ui/index.html", "/ui/sessions/signin"])
-def test_index_html(path):
-    r = requests.get(url() + path, timeout=10)
-
-    assert r.status_code == 200
-
-    assert "Strict-Transport-Security" in r.headers
-
-    # FIXME: There is no easy way to fix this for index.html, but since this path never appears anywhere,
-    # we can probably ignore this for now
-    if path != "/ui/index.html":
-        assert r.headers["Cache-Control"] == "no-store, no-cache, must-revalidate, max-age=0"
-
-    assert RE_MAIN_SCRIPT.search(r.text)
-
-
-def test_assets():
-    r = requests.get(url(), timeout=10)
-
-    m = RE_MAIN_SCRIPT.search(r.text)
-    r = requests.get(url() + f"/ui/{m.group(1)}")
-
-    assert "Strict-Transport-Security" in r.headers
-
-    assert r.headers["Cache-Control"] == "must-revalidate"
diff --git a/tests/api2/test_usage_reporting.py b/tests/api2/test_usage_reporting.py
deleted file mode 100644
index 58ee196a9fbf2..0000000000000
--- a/tests/api2/test_usage_reporting.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import pytest
-from itertools import chain
-from middlewared.test.integration.assets.nfs import nfs_server
-from middlewared.test.integration.assets.ftp import ftp_server
-from middlewared.test.integration.assets.pool import dataset as nfs_dataset
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.client import truenas_server
-from protocols import ftp_connection, SSH_NFS, nfs_share
-
-from auto_config import password, pool_name, user
-
-
-class GatherTypes:
-    expected = {
-        'total_capacity': ['total_capacity'],
-        'backup_data': ['data_backup_stats', 'data_without_backup_size'],
-        'applications': ['apps', 'catalog_items', 'docker_images'],
-        'filesystem_usage': ['datasets', 'zvols'],
-        'ha_stats': ['ha_licensed'],
-        'directory_service_stats': ['directory_services'],
-        'cloud_services': ['cloud_services'],
-        'hardware': ['hardware'],
-        'network': ['network'],
-        'system_version': ['platform', 'version'],
-        'system': ['system_hash', 'usage_version', 'system'],
-        'pools': ['pools', 'total_raw_capacity'],
-        'services': ['services'],
-        'nfs': ['NFS'],
-        'ftp': ['FTP'],
-        'sharing': ['shares'],
-        'vms': ['vms'],
-        'nspawn_containers': ['nspawn_containers'],
-        # Add new gather type here
-    }
-
-
-@pytest.fixture(scope="module")
-def get_usage_sample():
-    sample = call('usage.gather')
-    yield sample
-
-
-def test_gather_types(get_usage_sample):
-    """ Confirm we find the expected types. Fail if this test needs updating """
-    sample = get_usage_sample
-    expected = list(chain.from_iterable(GatherTypes.expected.values()))
-
-    # If there is a mismatch it probably means this test module needs to be updated
-    assert set(expected).symmetric_difference(sample) == set(), "Expected empty set. "\
-        f"Missing an entry in the output ({len(sample)} entries) or test needs updating ({len(expected)} entries)"
-
-
-def test_nfs_reporting(get_usage_sample):
-    """ Confirm we are correctly reporting the number of connections """
-    # Initial state should have NFSv[3,4] and no connections
-    assert set(get_usage_sample['NFS']['enabled_protocols']) == set(["NFSV3", "NFSV4"])
-    assert get_usage_sample['NFS']['num_clients'] == 0
-
-    # Establish two connections
-    nfs_path = f'/mnt/{pool_name}/test_nfs'
-    with nfs_dataset("test_nfs"):
-        with nfs_share(nfs_path):
-            with nfs_server():
-                with SSH_NFS(truenas_server.ip, nfs_path,
-                             user=user, password=password, ip=truenas_server.ip):
-                    usage_sample = call('usage.gather')
-                    assert usage_sample['NFS']['num_clients'] == 1
-
-
-def test_ftp_reporting(get_usage_sample):
-    """ Confirm we are correctly reporting the number of connections """
-    # Initial state should have no connections
-    assert get_usage_sample['FTP']['num_connections'] == 0
-
-    # Establish two connections
-    with ftp_server():
-        with ftp_connection(truenas_server.ip):
-            with ftp_connection(truenas_server.ip):
-                usage_sample = call('usage.gather')
-                assert usage_sample['FTP']['num_connections'] == 2
-
-
-# Possible TODO:  Add validation of the entries
diff --git a/tests/api2/test_user_create_dir.py b/tests/api2/test_user_create_dir.py
deleted file mode 100644
index 67dddbff3e388..0000000000000
--- a/tests/api2/test_user_create_dir.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import errno
-import os
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import user
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call
-
-DS_NAME = 'user-create-homedir'
-
-
-@pytest.fixture(scope='function')
-def setup_user():
-    with dataset(DS_NAME, data={'share_type': 'SMB'}) as ds:
-        with user({
-            'username': 'usercreate',
-            'full_name': 'usercreate',
-            'group_create': True,
-            'home': os.path.join('/mnt', ds),
-            'home_create': False,
-            'password': 'ABCD1234'
-        }) as u:
-            yield u | {'dataset': ds}
-
-
-def test_create_homedir(setup_user):
-    """ This test validates we can set create a new homedir within the currently set homedir """
-
-    call('user.update', setup_user['id'], {
-        'home': setup_user['home'],
-        'home_create': True
-    })
-
-    new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
-    assert new['home'] == os.path.join(setup_user['home'], setup_user['username'])
-
-    # verify that we won't endlessly create new homedirs within existing one if a user
-    # is not very API / design savvy
-    call('user.update', setup_user['id'], {
-        'home': setup_user['home'],
-        'home_create': True
-    })
-
-    new2 = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
-    assert new2['home'] == new['home']
-
-
-def test_user_change_homedir_no_traverse(setup_user):
-    """ we should not recurse into child datasets """
-    with dataset(f'{DS_NAME}/subds') as subds:
-
-        # Verify that new dataset exists in source
-        call('filesystem.listdir', setup_user['home'], [['name', '=', 'subds']], {'get': True})
-
-        with dataset('new-path', data={'share_type': 'SMB'}) as ds:
-            call('user.update', setup_user['id'], {
-                'home': os.path.join('/mnt', ds),
-                'home_create': True
-            })
-
-            new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
-
-            # Verify that we did not try to copy over the dataset
-            with pytest.raises(CallError) as ce:
-                call('filesystem.stat', os.path.join(new['home'], 'subds'))
-
-            assert ce.value.errno == errno.ENOENT
-
-
-def test_user_change_homedir_no_zfs_ctldir(setup_user):
-    """ we should not recurse into / try to copy .zfs if snapdir visible """
-    call('pool.dataset.update', setup_user['dataset'], {'snapdir': 'VISIBLE'})
-
-    call('user.update', setup_user['id'], {
-        'home': setup_user['home'],
-        'home_create': True
-    })
-
-    new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
-    assert new['home'] == os.path.join(setup_user['home'], setup_user['username'])
-
-
-    with pytest.raises(CallError) as ce:
-         call('filesystem.stat', os.path.join(new['home'], '.zfs'))
-
-    assert ce.value.errno == errno.ENOENT
-
-
-def test_user_change_homedir_acl_preserve(setup_user):
-    """ If for some reason files within homedir have ACL, it should be preserved on copy """
-    ACL = [{
-        'tag': 'owner@',
-        'id': -1,
-        'perms': {'BASIC': 'FULL_CONTROL'},
-        'flags': {'BASIC': 'INHERIT'},
-        'type': 'ALLOW',
-        'who': None,
-    }]
-    call('filesystem.mkdir', {'path': os.path.join(setup_user['home'], 'canary')})
-
-    call('filesystem.setacl', {
-        'path': os.path.join(setup_user['home'], 'canary'),
-        'dacl': ACL
-    }, job=True)
-
-
-    call('user.update', setup_user['id'], {
-        'home': setup_user['home'],
-        'home_create': True
-    })
-
-    new = call('user.query', [['id', '=', setup_user['id']]], {'get': True})
-
-    acl = call('filesystem.getacl', os.path.join(new['home'], 'canary'))['acl']
-
-    assert acl == ACL
diff --git a/tests/api2/test_user_ssh_password.py b/tests/api2/test_user_ssh_password.py
deleted file mode 100644
index 5e3caa7d81c60..0000000000000
--- a/tests/api2/test_user_ssh_password.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.account import user, group
-from middlewared.test.integration.utils import call, ssh
-
-
-@pytest.mark.parametrize("ssh_password_enabled", [True, False])
-def test_user_ssh_password_enabled(ssh_password_enabled):
-    with user({
-        "username": "test",
-        "full_name": "Test",
-        "group_create": True,
-        "home": f"/nonexistent",
-        "password": "test1234",
-        "ssh_password_enabled": ssh_password_enabled,
-    }):
-        result = ssh("whoami", check=False, complete_response=True, user="test",
-                     password="test1234")
-        if ssh_password_enabled:
-            assert "test" in result["output"]
-        else:
-            assert "Permission denied" in result["stderr"]
-
-
-@pytest.fixture(scope="module")
-def group1_with_user():
-    with group({"name": "group1"}) as g1:
-        with user({
-            "username": "test",
-            "full_name": "Test",
-            "group_create": True,
-            "groups": [g1["id"]],
-            "home": f"/nonexistent",
-            "password": "test1234",
-        }):
-            yield
-
-
-@pytest.mark.parametrize("ssh_password_enabled", [True, False])
-def test_group_ssh_password_enabled(group1_with_user, ssh_password_enabled):
-    call("ssh.update", {"password_login_groups": ["group1"] if ssh_password_enabled else []})
-
-    result = ssh("whoami", check=False, complete_response=True, user="test",
-                 password="test1234")
-    if ssh_password_enabled:
-        assert "test" in result["output"]
-    else:
-        assert "Permission denied" in result["stderr"]
diff --git a/tests/api2/test_user_truenas_admin.py b/tests/api2/test_user_truenas_admin.py
deleted file mode 100644
index 7fc738b41aecc..0000000000000
--- a/tests/api2/test_user_truenas_admin.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import io
-import os
-import subprocess
-import tarfile
-import tempfile
-
-import pytest
-import requests
-
-from middlewared.test.integration.assets.account import root_with_password_disabled
-from middlewared.test.integration.assets.keychain import ssh_keypair
-from middlewared.test.integration.utils import call, client, host, mock, url
-
-
-@pytest.fixture(scope="module")
-def truenas_admin():
-    assert call("user.query", [["uid", "=", 950]]) == []
-    assert call("user.query", [["username", "=", "truenas_admin"]]) == []
-
-    with root_with_password_disabled() as context:
-        context.client.call("datastore.update", "account.bsdusers", context.root_id, {"bsdusr_unixhash": "*"})
-        context.client.call("user.setup_local_administrator", "truenas_admin", "password")
-        call("system.info", client_kwargs=dict(auth=("truenas_admin", "password")))
-        # Quickly restore root password before anyone notices
-        context.client.call("datastore.update", "account.bsdusers", context.root_id, context.root_backup)
-        context.client.call("etc.generate", "user")
-
-        truenas_admin = call("user.query", [["username", "=", "truenas_admin"]], {"get": True})
-        try:
-            yield truenas_admin
-        finally:
-            call("datastore.delete", "account.bsdusers", truenas_admin["id"])
-            call("etc.generate", "user")
-
-
-def test_installer_admin_has_local_administrator_privilege(truenas_admin):
-    with client(auth=("truenas_admin", "password")) as c:
-        c.call("system.info")
-
-
-def test_can_set_admin_authorized_key(truenas_admin):
-    with ssh_keypair() as keypair:
-        call("user.update", truenas_admin["id"], {
-            "sshpubkey": keypair["attributes"]["public_key"],
-        })
-        try:
-            with tempfile.NamedTemporaryFile("w") as f:
-                os.chmod(f.name, 0o600)
-                f.write(keypair["attributes"]["private_key"])
-                f.flush()
-
-                subprocess.run([
-                    "ssh",
-                    "-i", f.name,
-                    "-o", "StrictHostKeyChecking=no",
-                    "-o", "UserKnownHostsFile=/dev/null",
-                    "-o", "VerifyHostKeyDNS=no",
-                    f"truenas_admin@{host().ip}",
-                    "uptime",
-                ], capture_output=True, check=True, timeout=30)
-
-                job_id, path = call("core.download", "config.save", [{"root_authorized_keys": True}], "config.tar")
-                r = requests.get(f"{url()}{path}")
-                r.raise_for_status()
-                tar_io = io.BytesIO(r.content)
-                with tarfile.TarFile(fileobj=tar_io) as tar:
-                    member = tar.getmember("truenas_admin_authorized_keys")
-                    assert member.uid == 950
-                    assert member.gid == 950
-                    assert member.uname == "truenas_admin"
-                    assert member.gname == "truenas_admin"
-                    assert tar.extractfile(member).read().decode() == keypair["attributes"]["public_key"]
-        finally:
-            call("user.update", truenas_admin["id"], {
-                "sshpubkey": "",
-            })
-
-
-def test_admin_user_alert(truenas_admin):
-    with mock("user.get_user_obj", args=[{"uid": 950}], return_value={
-        "pw_name": "root", "pw_uid": 0, "pw_gid": 0, "pw_gecos": "root", "pw_dir": "/root", "pw_shell": "/usr/bin/zsh"
-    }):
-        alerts = call("alert.run_source", "AdminUser")
-        assert len(alerts) == 1
-        assert alerts[0]["klass"] == "AdminUserIsOverridden"
-
-
-def test_admin_user_no_alert(truenas_admin):
-    assert not call("alert.run_source", "AdminUser")
diff --git a/tests/api2/test_virt_001_global.py b/tests/api2/test_virt_001_global.py
deleted file mode 100644
index 615d7424299c7..0000000000000
--- a/tests/api2/test_virt_001_global.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from middlewared.test.integration.utils.call import call
-from middlewared.test.integration.utils.ssh import ssh
-
-
-from auto_config import pool_name
-
-
-def test_virt_pool():
-    call('virt.global.update', {'pool': pool_name}, job=True)
-    ssh(f'zfs list {pool_name}/.ix-virt')
-
-
-def test_virt_no_pool():
-    call('virt.global.update', {'pool': None}, job=True)
-    ssh('incus storage show default 2>&1 | grep "incus daemon doesn\'t appear to be started"')
-
-
-def test_virt_pool_auto_bridge():
-    call('virt.global.update', {'pool': pool_name, 'bridge': None}, job=True)
-    ssh('ifconfig incusbr0')
diff --git a/tests/api2/test_virt_002_instance.py b/tests/api2/test_virt_002_instance.py
deleted file mode 100644
index fbb17f841cd13..0000000000000
--- a/tests/api2/test_virt_002_instance.py
+++ /dev/null
@@ -1,222 +0,0 @@
-from threading import Event
-
-from middlewared.test.integration.assets.filesystem import mkfile
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils.client import client
-from middlewared.test.integration.utils.call import call
-from middlewared.test.integration.utils.ssh import ssh
-
-from auto_config import pool_name
-
-INS1_NAME = 'debian'
-INS1_OS = 'Debian'
-INS1_IMAGE = 'debian/trixie'
-
-INS2_NAME = 'void'
-INS2_OS = 'Void Linux'
-INS2_IMAGE = 'voidlinux/musl'
-
-INS3_NAME = 'ubuntu'
-INS3_OS = 'Ubuntu'
-INS3_IMAGE = 'ubuntu/oracular/default'
-
-
-def clean():
-    call('virt.global.update', {'pool': None}, job=True)
-    ssh(f'zfs destroy -r {pool_name}/.ix-virt || true')
-    call('virt.global.update', {'pool': 'tank'}, job=True)
-
-
-def test_virt_instance_create():
-    clean()
-
-    wait_agent = Event()
-
-    def wait_debian(*args, **kwargs):
-        wait_agent.set()
-
-    with client() as c:
-        c.subscribe('virt.instance.agent_running', wait_debian, sync=True)
-
-        # Create first so there is time for the agent to start
-        call('virt.instance.create', {'name': INS1_NAME, 'image': INS1_IMAGE, 'instance_type': 'VM'}, job=True)
-
-        call('virt.instance.create', {'name': INS2_NAME, 'image': INS2_IMAGE}, job=True)
-        ssh(f'incus exec {INS2_NAME} cat /etc/os-release | grep "{INS2_OS}"')
-
-        call('virt.instance.create', {
-            'name': INS3_NAME,
-            'image': INS3_IMAGE,
-            'devices': [
-                {'dev_type': 'TPM', 'path': '/dev/tpm0', 'pathrm': '/dev/tmprm0'},
-                {'dev_type': 'PROXY', 'source_proto': 'TCP', 'source_port': 60123, 'dest_proto': 'TCP', 'dest_port': 2000},
-            ],
-        }, job=True)
-        ssh(f'incus exec {INS3_NAME} cat /etc/os-release | grep "{INS3_OS}"')
-
-        devices = call('virt.instance.device_list', INS3_NAME)
-        assert any(i for i in devices if i['name'] == 'tpm0'), devices
-        assert any(i for i in devices if i['name'] == 'proxy0'), devices
-
-        assert wait_agent.wait(timeout=60)
-        ssh(f'incus exec {INS1_NAME} cat /etc/os-release | grep "{INS1_OS}"')
-
-
-def test_virt_instance_update():
-    call('virt.instance.update', INS2_NAME, {'cpu': '1', 'memory': 500 * 1024 * 1024, 'environment': {'FOO': 'BAR'}}, job=True)
-    ssh(f'incus exec {INS2_NAME} grep MemTotal: /proc/meminfo|grep 512000')
-    # Checking CPUs seems to cause a racing condition (perhaps CPU currently in use in the container?)
-    # rv = ssh('incus exec void cat /proc/cpuinfo |grep processor|wc -l')
-    # assert rv.strip() == '1'
-    rv = ssh(f'incus exec {INS2_NAME} env | grep ^FOO=')
-    assert rv.strip() == 'FOO=BAR'
-
-    call('virt.instance.update', INS2_NAME, {'cpu': None, 'memory': None, 'environment': {}}, job=True)
-
-    rv = ssh(f'incus exec {INS2_NAME} env | grep ^FOO= || true')
-    assert rv.strip() == ''
-
-
-def test_virt_instance_stop():
-    wait_status_event = Event()
-
-    def wait_status(event_type, **kwargs):
-        if kwargs['collection'] == 'virt.instance.query' and kwargs['id'] == INS2_NAME:
-            fields = kwargs.get('fields')
-            if fields and fields.get('status') == 'STOPPED':
-                wait_status_event.set()
-
-    with client() as c:
-        c.subscribe('virt.instance.query', wait_status, sync=True)
-
-        # Stop only one of them so the others are stopped during delete
-        assert ssh(f'incus list {INS2_NAME} -f json| jq ".[].status"').strip() == '"Running"'
-        instance = c.call('virt.instance.query', [['id', '=', INS2_NAME]], {'get': True})
-        assert instance['status'] == 'RUNNING'
-        call('virt.instance.stop', INS2_NAME, {'force': True}, job=True)
-        instance = c.call('virt.instance.query', [['id', '=', INS2_NAME]], {'get': True})
-        assert instance['status'] == 'STOPPED'
-        assert wait_status_event.wait(timeout=1)
-        assert ssh(f'incus list {INS2_NAME} -f json| jq ".[].status"').strip() == '"Stopped"'
-
-
-def test_virt_instance_restart():
-    # Stop only one of them so the others are stopped during delete
-    assert ssh(f'incus list {INS3_NAME} -f json| jq ".[].status"').strip() == '"Running"'
-    instance = call('virt.instance.query', [['id', '=', INS3_NAME]], {'get': True})
-    assert instance['status'] == 'RUNNING'
-    call('virt.instance.restart', INS3_NAME, {'force': True}, job=True)
-    instance = call('virt.instance.query', [['id', '=', INS3_NAME]], {'get': True})
-    assert instance['status'] == 'RUNNING'
-    assert ssh(f'incus list {INS3_NAME} -f json| jq ".[].status"').strip() == '"Running"'
-
-
-def test_virt_instance_device_add():
-    assert ssh(f'incus list {INS1_NAME} -f json| jq ".[].status"').strip() == '"Running"'
-    call('virt.instance.stop', INS1_NAME, {'force': True}, job=True)
-
-    assert call('virt.instance.device_add', INS1_NAME, {
-        'name': 'tpm',
-        'dev_type': 'TPM',
-    }) is True
-
-    assert call('virt.instance.device_add', INS3_NAME, {
-        'name': 'proxy',
-        'dev_type': 'PROXY',
-        'source_proto': 'TCP',
-        'source_port': 8005,
-        'dest_proto': 'TCP',
-        'dest_port': 80,
-    }) is True
-
-    # TODO: adding to a VM causes start to hang at the moment (zombie process)
-    # call('virt.instance.device_add', 'debian', {
-    #     'name': 'disk1',
-    #     'dev_type': 'DISK',
-    #     'source': f'/mnt/{pool_name}',
-    #     'destination': '/host',
-    # })
-
-    devices = call('virt.instance.device_list', INS1_NAME)
-    assert any(i for i in devices if i['name'] == 'tpm'), devices
-    devices = call('virt.instance.device_list', INS3_NAME)
-    assert any(i for i in devices if i['name'] == 'proxy'), devices
-    # assert 'disk1' in devices, devices
-
-    wait_agent = Event()
-
-    def wait_debian(*args, **kwargs):
-        wait_agent.set()
-
-    with client() as c:
-        c.subscribe('virt.instance.agent_running', wait_debian, sync=True)
-        call('virt.instance.start', INS1_NAME, job=True)
-        assert wait_agent.wait(timeout=30)
-
-    ssh('incus exec debian ls /dev/tpm0')
-    # ssh('incus exec debian ls /host')
-
-    with dataset('virtshare') as ds:
-        call('virt.instance.device_add', INS3_NAME, {
-            'name': 'disk1',
-            'dev_type': 'DISK',
-            'source': f'/mnt/{ds}',
-            'destination': '/host',
-        })
-        devices = call('virt.instance.device_list', INS3_NAME)
-        assert any(i for i in devices if i['name'] == 'disk1'), devices
-        with mkfile(f'/mnt/{ds}/testfile'):
-            ssh(f'incus exec {INS3_NAME} ls /host/testfile')
-        assert call('virt.instance.device_delete', INS3_NAME, 'disk1') is True
-
-    with dataset('virtshare', {'type': 'VOLUME', 'volsize': 200 * 1024 * 1024, 'sparse': True}) as ds:
-        ssh(f'mkfs.ext3 /dev/zvol/{ds}')
-        call('virt.instance.device_add', INS3_NAME, {
-            'name': 'disk2',
-            'dev_type': 'DISK',
-            'source': f'/dev/zvol/{ds}',
-            'destination': '/zvol',
-        })
-        devices = call('virt.instance.device_list', INS3_NAME)
-        assert any(i for i in devices if i['name'] == 'disk2'), devices
-        ssh(f'incus exec {INS3_NAME} mount|grep "on /zvol"|grep ext3')
-        assert call('virt.instance.device_delete', INS3_NAME, 'disk2') is True
-
-
-def test_virt_instance_device_update():
-    assert call('virt.instance.device_update', INS3_NAME, {
-        'name': 'proxy',
-        'dev_type': 'PROXY',
-        'source_proto': 'TCP',
-        'source_port': 8005,
-        'dest_proto': 'TCP',
-        'dest_port': 81,
-    }) is True
-
-
-def test_virt_instance_proxy():
-    ssh(f'incus exec -T {INS3_NAME} -- bash -c "nohup nc -l 0.0.0.0 81 > /tmp/nc 2>&1 &"')
-    ssh('echo "foo" | nc -w 1 localhost 8005 || true')
-    rv = ssh(f'incus exec {INS3_NAME} -- cat /tmp/nc')
-
-    assert rv.strip() == 'foo'
-
-
-def test_virt_instance_device_delete():
-    call('virt.instance.stop', INS1_NAME, {'force': True}, job=True)
-    assert call('virt.instance.device_delete', INS1_NAME, 'tpm') is True
-    devices = call('virt.instance.device_list', INS1_NAME)
-    assert not any(i for i in devices if i['name'] == 'tpm'), devices
-
-
-def test_virt_instance_delete():
-    call('virt.instance.delete', INS2_NAME, job=True)
-    ssh(f'incus config show {INS2_NAME} 2>&1 | grep "not found"')
-
-    call('virt.instance.delete', INS3_NAME, job=True)
-    ssh(f'incus config show {INS3_NAME} 2>&1 | grep "not found"')
-
-    call('virt.instance.delete', INS1_NAME, job=True)
-    ssh(f'incus config show {INS1_NAME} 2>&1 | grep "not found"')
-
-    assert len(call('virt.instance.query')) == 0
diff --git a/tests/api2/test_vm_roles.py b/tests/api2/test_vm_roles.py
deleted file mode 100644
index eece06abc5f30..0000000000000
--- a/tests/api2/test_vm_roles.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import pytest
-
-from middlewared.test.integration.assets.roles import common_checks
-
-
-@pytest.mark.parametrize('method, expected_error', [
-    ('vm.virtualization_details', False),
-    ('vm.maximum_supported_vcpus', False),
-    ('vm.get_display_devices', True),
-    ('vm.get_display_web_uri', True),
-    ('vm.get_available_memory', False),
-    ('vm.bootloader_options', False),
-])
-def test_vm_readonly_role(unprivileged_user_fixture, method, expected_error):
-    common_checks(unprivileged_user_fixture, method, 'READONLY_ADMIN', True, valid_role_exception=expected_error)
-
-
-@pytest.mark.parametrize('role, method, valid_role', [
-    ('VM_READ', 'vm.supports_virtualization', True),
-    ('VM_WRITE', 'vm.supports_virtualization', True),
-    ('VM_READ', 'vm.virtualization_details', True),
-    ('VM_WRITE', 'vm.virtualization_details', True),
-    ('VM_READ', 'vm.maximum_supported_vcpus', True),
-    ('VM_WRITE', 'vm.maximum_supported_vcpus', True),
-    ('VM_READ', 'vm.flags', True),
-    ('VM_WRITE', 'vm.flags', True),
-    ('VM_READ', 'vm.cpu_model_choices', True),
-    ('VM_WRITE', 'vm.cpu_model_choices', True),
-    ('VM_READ', 'vm.port_wizard', True),
-    ('VM_READ', 'vm.bootloader_options', True),
-])
-def test_vm_read_write_roles(unprivileged_user_fixture, role, method, valid_role):
-    common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=False)
-
-
-@pytest.mark.parametrize('role, method, valid_role', [
-    ('VM_WRITE', 'vm.clone', True),
-    ('VM_READ', 'vm.get_memory_usage', True),
-    ('VM_WRITE', 'vm.get_memory_usage', True),
-    ('VM_READ', 'vm.start', False),
-    ('VM_WRITE', 'vm.start', True),
-    ('VM_READ', 'vm.stop', False),
-    ('VM_WRITE', 'vm.stop', True),
-    ('VM_READ', 'vm.restart', False),
-    ('VM_WRITE', 'vm.restart', True),
-    ('VM_READ', 'vm.suspend', False),
-    ('VM_WRITE', 'vm.suspend', True),
-    ('VM_READ', 'vm.resume', False),
-    ('VM_WRITE', 'vm.resume', True),
-    ('VM_READ', 'vm.get_vm_memory_info', True),
-    ('VM_READ', 'vm.get_display_devices', True),
-    ('VM_READ', 'vm.status', True),
-    ('VM_READ', 'vm.log_file_path', True),
-])
-def test_vm_read_write_roles_requiring_virtualization(unprivileged_user_fixture, role, method, valid_role):
-    common_checks(unprivileged_user_fixture, method, role, valid_role)
-
-
-@pytest.mark.parametrize('role, method, valid_role', [
-    ('VM_DEVICE_READ', 'vm.device.iommu_enabled', True),
-    ('VM_DEVICE_READ', 'vm.device.passthrough_device_choices', True),
-    ('VM_DEVICE_READ', 'vm.device.nic_attach_choices', True),
-    ('VM_DEVICE_READ', 'vm.device.usb_passthrough_choices', True),
-    ('VM_READ', 'vm.guest_architecture_and_machine_choices', True),
-])
-def test_vm_device_read_write_roles(unprivileged_user_fixture, role, method, valid_role):
-    common_checks(unprivileged_user_fixture, method, role, valid_role, valid_role_exception=False)
-
-
-@pytest.mark.parametrize('role, method, valid_role', [
-    ('VM_DEVICE_READ', 'vm.device.passthrough_device', True),
-    ('VM_DEVICE_WRITE', 'vm.device.passthrough_device', True),
-])
-def test_vm_device_read_write_roles_requiring_virtualization(unprivileged_user_fixture, role, method, valid_role):
-    common_checks(unprivileged_user_fixture, method, role, valid_role)
diff --git a/tests/api2/test_vmware.py b/tests/api2/test_vmware.py
deleted file mode 100644
index 73328abbe1460..0000000000000
--- a/tests/api2/test_vmware.py
+++ /dev/null
@@ -1,190 +0,0 @@
-import contextlib
-import ssl
-import time
-import types
-
-import pytest
-from pyVim import connect, task as VimTask
-from pyVmomi import vim
-
-from middlewared.test.integration.assets.nfs import nfs_share
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.assets.snapshot_task import snapshot_task
-from middlewared.test.integration.assets.vmware import vmware
-from middlewared.test.integration.utils import call, ssh
-from middlewared.test.integration.utils.client import truenas_server
-from middlewared.test.integration.utils.string import random_string
-
-import os
-import sys
-apifolder = os.getcwd()
-sys.path.append(apifolder)
-
-try:
-    from config import (
-        VCENTER_HOSTNAME,
-        VCENTER_USERNAME,
-        VCENTER_PASSWORD,
-        VCENTER_DATACENTER,
-        VCENTER_ESX_HOST,
-    )
-except ImportError:
-    pytestmark = pytest.mark.skip(reason='vCenter credential are missing in config.py')
-
-
-@contextlib.contextmanager
-def vcenter_connection():
-    ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
-    ssl_context.verify_mode = ssl.CERT_NONE
-    si = connect.SmartConnect(
-        host=VCENTER_HOSTNAME,
-        user=VCENTER_USERNAME,
-        pwd=VCENTER_PASSWORD,
-        sslContext=ssl_context,
-    )
-
-    try:
-        yield si
-    finally:
-        connect.Disconnect(si)
-
-
-@contextlib.contextmanager
-def datastore(si):
-    content = si.RetrieveContent()
-
-    for datacenter in content.viewManager.CreateContainerView(
-        content.rootFolder,
-        [vim.Datacenter],
-        True,
-    ).view:
-        if datacenter.name == VCENTER_DATACENTER:
-            break
-    else:
-        raise RuntimeError(f"Datacenter {VCENTER_DATACENTER} not found")
-
-    for host in content.viewManager.CreateContainerView(
-        content.rootFolder,
-        [vim.HostSystem],
-        True,
-    ).view:
-        if host.name == VCENTER_ESX_HOST:
-            break
-    else:
-        raise RuntimeError(f"ESX host {VCENTER_ESX_HOST} not found")
-
-    with dataset(f"vm_{random_string()}") as ds:
-        with nfs_share(ds) as share:
-            ssh(f"chmod 777 /mnt/{ds}")
-
-            datastore_name = random_string()
-
-            datastore = host.configManager.datastoreSystem.CreateNasDatastore(
-                vim.host.NasVolume.Specification(
-                    remoteHost=truenas_server.ip,
-                    remotePath=share["path"],
-                    localPath=datastore_name,
-                    accessMode=vim.host.MountInfo.AccessMode.readWrite,
-                    type=vim.host.FileSystemVolume.FileSystemType.NFS
-                )
-            )
-
-            try:
-                yield types.SimpleNamespace(
-                    datacenter=datacenter,
-                    host=host,
-                    name=datastore_name,
-                    dataset=ds,
-                )
-            finally:
-                VimTask.WaitForTask(datastore.Destroy_Task())
-
-
-@contextlib.contextmanager
-def vm(si, datastore):
-    content = si.RetrieveContent()
-
-    vm_name = random_string()
-
-    config = vim.vm.ConfigSpec()
-    config.memoryMB = 2048
-    config.guestId = "ubuntu64Guest"
-    config.name = vm_name
-    config.numCPUs = 1
-    config.files = vim.vm.FileInfo()
-    config.files.vmPathName = f"[{datastore.name}]"
-
-    VimTask.WaitForTask(datastore.datacenter.vmFolder.CreateVm(
-        config,
-        pool=datastore.host.parent.resourcePool,
-        host=datastore.host,
-    ))
-
-    for vm in content.viewManager.CreateContainerView(
-        content.rootFolder,
-        [vim.VirtualMachine],
-        True,
-    ).view:
-        if vm.name == vm_name:
-            break
-    else:
-        raise RuntimeError("Created VM not found")
-
-    try:
-        VimTask.WaitForTask(vm.PowerOn())
-
-        try:
-            yield vm_name
-        finally:
-            VimTask.WaitForTask(vm.PowerOff())
-    finally:
-        VimTask.WaitForTask(vm.Destroy_Task())
-
-
-def test_vmware():
-    with vcenter_connection() as si:
-        with datastore(si) as ds:
-            with vm(si, ds):
-                result = call(
-                    "vmware.match_datastores_with_datasets",
-                    {
-                        "hostname": VCENTER_HOSTNAME,
-                        "username": VCENTER_USERNAME,
-                        "password": VCENTER_PASSWORD,
-                    },
-                )
-                for rds in result["datastores"]:
-                    if (
-                        rds["name"] == ds.name and
-                        rds["description"] == f"NFS mount '/mnt/{ds.dataset}' on {truenas_server.ip}" and
-                        rds["filesystems"] == [ds.dataset]
-                    ):
-                        break
-                else:
-                    assert False, result
-
-                with vmware({
-                    "datastore": ds.name,
-                    "filesystem": ds.dataset,
-                    "hostname": VCENTER_HOSTNAME,
-                    "username": VCENTER_USERNAME,
-                    "password": VCENTER_PASSWORD,
-                }):
-                    with snapshot_task({
-                        "dataset": ds.dataset,
-                        "recursive": False,
-                        "lifetime_value": 1,
-                        "lifetime_unit": "DAY",
-                        "naming_schema": "%Y%m%d%H%M",
-                    }) as task:
-                        call("pool.snapshottask.run", task["id"])
-
-                        for i in range(60):
-                            time.sleep(1)
-                            snapshots = call("zfs.snapshot.query", [["dataset", "=", ds.dataset]])
-                            if snapshots:
-                                break
-                        else:
-                            assert False
-
-                        assert snapshots[0]["properties"]["freenas:vmsynced"]["value"] == "Y"
diff --git a/tests/api2/test_vmware_snapshot_delete.py b/tests/api2/test_vmware_snapshot_delete.py
deleted file mode 100644
index 3952e98e30fca..0000000000000
--- a/tests/api2/test_vmware_snapshot_delete.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import contextlib
-from datetime import datetime
-from unittest.mock import ANY
-
-from middlewared.test.integration.utils import call, mock
-
-
-@contextlib.contextmanager
-def pending_snapshot_delete(d):
-    psd = {
-        "vmware": {
-            "hostname": "host",
-            "username": "user",
-            "password": "pass",
-        },
-        "vm_uuid": "abcdef",
-        "snapshot_name": "snapshot",
-        "datetime": d,
-    }
-    psd["id"] = call("datastore.insert", "storage.vmwarependingsnapshotdelete", psd)
-    try:
-        yield psd
-    finally:
-        call("datastore.delete", "storage.vmwarependingsnapshotdelete", psd["id"])
-
-
-def test_success():
-    with pending_snapshot_delete(datetime(2100, 1, 1)):
-        with mock("vmware.connect", return_value=None):
-            with mock("vmware.find_vms_by_uuid", return_value=[None]):
-                with mock("vmware.delete_snapshot", return_value=None):
-                    with mock("vmware.disconnect", return_value=None):
-                        call("vmware.delete_pending_snapshots")
-
-                        assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == []
-
-
-def test_failure_1():
-    with pending_snapshot_delete(datetime(2100, 1, 1)):
-        with mock("vmware.connect", f"""
-            async def mock(self, *args):
-                raise Exception('Unknown error')
-        """):
-            call("vmware.delete_pending_snapshots")
-
-            assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == [ANY]
-
-
-def test_failure_2():
-    with pending_snapshot_delete(datetime(2100, 1, 1)):
-        with mock("vmware.connect", return_value=None):
-            with mock("vmware.find_vms_by_uuid", f"""
-                async def mock(self, *args):
-                    raise Exception('Unknown error')
-            """):
-                call("vmware.delete_pending_snapshots")
-
-                assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == [ANY]
-
-
-def test_failure_and_expiry():
-    with pending_snapshot_delete(datetime(2010, 1, 1)):
-        with mock("vmware.connect", f"""
-            async def mock(self, *args):
-                raise Exception('Unknown error')
-        """):
-            call("vmware.delete_pending_snapshots")
-
-            assert call("datastore.query", "storage.vmwarependingsnapshotdelete") == []
diff --git a/tests/api2/test_vmware_state.py b/tests/api2/test_vmware_state.py
deleted file mode 100644
index fe293ccefc82a..0000000000000
--- a/tests/api2/test_vmware_state.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from unittest.mock import ANY
-
-from middlewared.test.integration.utils import call, mock
-
-
-def test_vmware_state_lifetime():
-    vmsnapobj = {
-        "hostname": "host",
-        "username": "user",
-        "password": "pass",
-    }
-    with mock("vmware.validate_data", return_value=None):
-        vmware = call("vmware.create", {
-            "datastore": "ds",
-            "filesystem": "fs",
-            **vmsnapobj,
-        })
-        try:
-            assert vmware["state"] == {"state": "PENDING"}
-
-            call("vmware.alert_vmware_login_failed", vmsnapobj, "Unknown error")
-            vmware = call("vmware.get_instance", vmware["id"])
-            assert vmware["state"] == {"state": "ERROR", "error": "Unknown error", "datetime": ANY}
-
-            call("vmware.delete_vmware_login_failed_alert", vmsnapobj)
-            vmware = call("vmware.get_instance", vmware["id"])
-            assert vmware["state"] == {"state": "SUCCESS", "datetime": ANY}
-
-            call("vmware.update", vmware["id"], {})
-            vmware = call("vmware.get_instance", vmware["id"])
-            assert vmware["state"] == {"state": "PENDING"}
-        finally:
-            call("vmware.delete", vmware["id"])
diff --git a/tests/api2/test_websocket_interface.py b/tests/api2/test_websocket_interface.py
deleted file mode 100644
index 9d980cc799f11..0000000000000
--- a/tests/api2/test_websocket_interface.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from auto_config import interface
-from middlewared.test.integration.utils import call
-
-
-def test_websocket_interface():
-    """This tests to ensure we return the interface name
-    by which the websocket connection has been established."""
-    assert call("interface.websocket_interface")["id"] == interface
diff --git a/tests/api2/test_websocket_local_ip.py b/tests/api2/test_websocket_local_ip.py
deleted file mode 100644
index aac486b1097e9..0000000000000
--- a/tests/api2/test_websocket_local_ip.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from middlewared.test.integration.utils import call
-from middlewared.test.integration.utils.client import truenas_server
-
-
-def test_websocket_local_ip():
-    """This tests to ensure we return the local IP address
-    of the TrueNAS system based on the websocket session."""
-    assert call("interface.websocket_local_ip") == truenas_server.ip
diff --git a/tests/api2/test_webui_crypto_service.py b/tests/api2/test_webui_crypto_service.py
deleted file mode 100644
index 1b2b49f9e805a..0000000000000
--- a/tests/api2/test_webui_crypto_service.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import errno
-import pytest
-
-from middlewared.service_exception import CallError
-from middlewared.test.integration.assets.account import unprivileged_user_client
-from middlewared.test.integration.utils import call
-
-
-@pytest.mark.parametrize('role,endpoint,valid_role', (
-    ('READONLY_ADMIN', 'webui.crypto.certificate_profiles', True),
-    ('READONLY_ADMIN', 'webui.crypto.certificateauthority_profiles', True),
-    ('NETWORK_INTERFACE_WRITE', 'webui.crypto.certificate_profiles', False),
-    ('NETWORK_INTERFACE_WRITE', 'webui.crypto.certificateauthority_profiles', False),
-))
-def test_ui_crypto_profiles_readonly_role(role, endpoint, valid_role):
-    with unprivileged_user_client(roles=[role]) as c:
-        if valid_role:
-            c.call(endpoint)
-        else:
-            with pytest.raises(CallError) as ve:
-                c.call(endpoint)
-
-            assert ve.value.errno == errno.EACCES
-            assert ve.value.errmsg == 'Not authorized'
-
-
-@pytest.mark.parametrize('role,valid_role', (
-    ('READONLY_ADMIN', True),
-    ('NETWORK_INTERFACE_WRITE', False),
-))
-def test_ui_crypto_domain_names_readonly_role(role, valid_role):
-    default_certificate = call('certificate.query', [('name', '=', 'truenas_default')])
-    if not default_certificate:
-        pytest.skip('Default certificate does not exist which is required for this test')
-    else:
-        default_certificate = default_certificate[0]
-
-    with unprivileged_user_client(roles=[role]) as c:
-        if valid_role:
-            c.call('webui.crypto.get_certificate_domain_names', default_certificate['id'])
-        else:
-            with pytest.raises(CallError) as ve:
-                c.call('webui.crypto.get_certificate_domain_names', default_certificate['id'])
-
-            assert ve.value.errno == errno.EACCES
-            assert ve.value.errmsg == 'Not authorized'
diff --git a/tests/api2/test_zfs_dataset_list.py b/tests/api2/test_zfs_dataset_list.py
deleted file mode 100644
index b9bab27ca8ca7..0000000000000
--- a/tests/api2/test_zfs_dataset_list.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import call, ssh
-
-
-def test__unlocked_zvols_fast__volmode():
-    with dataset("container") as container:
-        ssh(f"zfs set volmode=full {container}")
-
-        with dataset("container/zvol", {"type": "VOLUME", "volsize": 100 * 1024 * 1024}) as zvol:
-            ssh(f"sgdisk -n 1:1MiB:2MiB /dev/zvol/{zvol}")
-
-            call("zfs.dataset.unlocked_zvols_fast", [["name", "=", zvol]], {}, ["SIZE", "RO", "DEVID", "ATTACHMENT"])
diff --git a/tests/api2/test_zfs_snapshot_events.py b/tests/api2/test_zfs_snapshot_events.py
deleted file mode 100644
index 605b6a9af2c2e..0000000000000
--- a/tests/api2/test_zfs_snapshot_events.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import errno
-import pprint
-import pytest
-
-from unittest.mock import ANY
-
-from middlewared.service_exception import InstanceNotFound, ValidationErrors, ValidationError
-from middlewared.test.integration.assets.pool import dataset
-from middlewared.test.integration.utils import client
-
-
-def test_create():
-    with dataset("test_snapshot_events_create") as ds:
-        with client() as c:
-            events = []
-
-            def callback(type, **message):
-                events.append((type, message))
-
-            c.subscribe("zfs.snapshot.query", callback, sync=True)
-            c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
-
-            assert len(events) == 1, pprint.pformat(events, indent=2)
-            assert events[0][0] == "ADDED"
-            assert events[0][1] == {"collection": "zfs.snapshot.query", "msg": "added", "id": f"{ds}@test",
-                                    "fields": ANY}
-
-
-def test_delete():
-    with dataset("test_snapshot_events_delete") as ds:
-        with client() as c:
-            c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
-
-            events = []
-
-            def callback(type, **message):
-                events.append((type, message))
-
-            c.subscribe("zfs.snapshot.query", callback, sync=True)
-            c.call("zfs.snapshot.delete", f"{ds}@test")
-
-            assert len(events) == 1, pprint.pformat(events, indent=2)
-            assert events[0][0] == "REMOVED"
-            assert events[0][1] == {"collection": "zfs.snapshot.query", "msg": "removed", "id": f"{ds}@test",
-                                    "extra": {"recursive": False}}
-
-
-def test_delete_with_dependent_clone():
-    with dataset("test_snapshot_events_dependent_clone") as ds:
-        with client() as c:
-            c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
-            c.call("zfs.snapshot.clone", {"snapshot": f"{ds}@test", "dataset_dst": f"{ds}/clone01"})
-
-            with pytest.raises(ValidationErrors) as ve:
-                c.call("zfs.snapshot.delete", f"{ds}@test")
-
-            assert ve.value.errors == [
-                ValidationError(
-                    "options.defer",
-                    f"Please set this attribute as '{ds}@test' snapshot has dependent clones: {ds}/clone01",
-                    errno.EINVAL
-                ),
-            ]
-
-
-def test_delete_nonexistent_snapshot():
-    with dataset("test_snapshot_events_nonexistent_snapshot") as ds:
-        with client() as c:
-            c.call("zfs.snapshot.create", {"dataset": ds, "name": "test"})
-
-            with pytest.raises(InstanceNotFound) as e:
-                c.call("zfs.snapshot.delete", f"{ds}@testing")
-
-            assert str(e.value) == f"[ENOENT] None: Snapshot {ds}@testing not found"
diff --git a/tests/api2/test_zfs_snapshot_hold.py b/tests/api2/test_zfs_snapshot_hold.py
deleted file mode 100644
index d73e2f606bacc..0000000000000
--- a/tests/api2/test_zfs_snapshot_hold.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from unittest.mock import ANY
-
-from middlewared.test.integration.assets.pool import dataset, snapshot
-from middlewared.test.integration.utils import call
-
-
-def test_normal_snapshot():
-    with dataset("test_normal_hold") as ds:
-        with snapshot(ds, "test") as id:
-            assert call("zfs.snapshot.get_instance", id, {"extra": {"holds": True}})["holds"] == {}
-
-
-def test_held_snapshot():
-    with dataset("test_held_snapshot") as ds:
-        with snapshot(ds, "test") as id:
-            call("zfs.snapshot.hold", id)
-
-            assert call("zfs.snapshot.get_instance", id, {"extra": {"holds": True}})["holds"] == {"truenas": ANY}
-
-            call("zfs.snapshot.release", id)  # Otherwise the whole test tree won't be deleted
-
-
-def test_held_snapshot_tree():
-    with dataset("test_snapshot_tree") as ds:
-        with dataset("test_snapshot_tree/child") as ds2:
-            with snapshot(ds, "test", recursive=True) as id:
-                id2 = f"{ds2}@test"
-
-                call("zfs.snapshot.hold", id, {"recursive": True})
-
-                assert call("zfs.snapshot.get_instance", id, {"extra": {"holds": True}})["holds"] == {"truenas": ANY}
-                assert call("zfs.snapshot.get_instance", id2, {"extra": {"holds": True}})["holds"] == {"truenas": ANY}
-
-                call("zfs.snapshot.release", id, {"recursive": True})  # Otherwise the whole test tree won't be deleted
diff --git a/tests/api2/test_zpool_capacity_alert.py b/tests/api2/test_zpool_capacity_alert.py
deleted file mode 100644
index 2d8cd7e8dcf29..0000000000000
--- a/tests/api2/test_zpool_capacity_alert.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import pytest
-from pytest_dependency import depends
-from middlewared.service_exception import CallError
-from middlewared.test.integration.utils import call, mock, pool
-
-
-
-def test__does_not_emit_alert(request):
-    with mock("zfs.pool.query", return_value=[
-        {
-            "name": pool,
-            "properties": {
-                "capacity": {
-                    "parsed": "50",
-                }
-            },
-        }
-    ]):
-        assert call("alert.run_source", "ZpoolCapacity") == []
-
-
-def test__emits_alert(request):
-    with mock("zfs.pool.query", return_value=[
-        {
-            "name": pool,
-            "properties": {
-                "capacity": {
-                    "parsed": "85",
-                }
-            },
-        }
-    ]):
-        alerts = call("alert.run_source", "ZpoolCapacity")
-        assert len(alerts) == 1
-        assert alerts[0]["klass"] == "ZpoolCapacityWarning"
-        assert alerts[0]["key"] == f'["{pool}"]'
-        assert alerts[0]["args"] == {"volume": pool, "capacity": 85}
-
-
-def test__does_not_flap_alert(request):
-    with mock("zfs.pool.query", return_value=[
-        {
-            "name": pool,
-            "properties": {
-                "capacity": {
-                    "parsed": "79",
-                }
-            },
-        }
-    ]):
-        with pytest.raises(CallError) as e:
-            call("alert.run_source", "ZpoolCapacity")
-
-        assert e.value.errno == CallError.EALERTCHECKERUNAVAILABLE
diff --git a/tests/api2/test_zpool_status.py b/tests/api2/test_zpool_status.py
deleted file mode 100644
index 9ca5e462ff4ee..0000000000000
--- a/tests/api2/test_zpool_status.py
+++ /dev/null
@@ -1,266 +0,0 @@
-import os
-
-import pytest
-
-from middlewared.test.integration.assets.pool import another_pool
-from middlewared.test.integration.utils import call, ssh
-
-
-POOL_NAME = 'test_format_pool'
-ZFS_PART_UUID = '6a898cc3-1dd2-11b2-99a6-080020736631'
-
-
-def get_disk_uuid_mapping(unused_disks):
-    disk_uuid = {}
-    for disk in filter(
-        lambda n: n['name'] in unused_disks and n['parts'], call('device.get_disks', True, False).values()
-    ):
-        if partition := next((part for part in disk['parts'] if part['partition_type'] == ZFS_PART_UUID), None):
-            disk_uuid[disk['name']] = os.path.join('/dev/disk/by-partuuid', partition['partition_uuid'])
-
-    return disk_uuid
-
-
-def get_pool_status(unused_disks, real_paths=False, replaced=False):
-    disk_uuid_mapping = get_disk_uuid_mapping(unused_disks)
-    return {
-        'disks': {
-            f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}': {
-                'pool_name': POOL_NAME,
-                'disk_status': 'AVAIL' if not replaced else 'ONLINE',
-                'disk_read_errors': 0,
-                'disk_write_errors': 0,
-                'disk_checksum_errors': 0,
-                'vdev_name': 'stripe' if not replaced else 'spare-0',
-                'vdev_type': 'spares' if not replaced else 'data',
-                'vdev_disks': [
-                    f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
-                ] if not replaced else [
-                    f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
-                    f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
-                ]
-            },
-            f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}': {
-                'pool_name': POOL_NAME,
-                'disk_status': 'ONLINE',
-                'disk_read_errors': 0,
-                'disk_write_errors': 0,
-                'disk_checksum_errors': 0,
-                'vdev_name': 'stripe',
-                'vdev_type': 'logs',
-                'vdev_disks': [
-                    f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}'
-                ]
-            },
-            f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}': {
-                'pool_name': POOL_NAME,
-                'disk_status': 'ONLINE',
-                'disk_read_errors': 0,
-                'disk_write_errors': 0,
-                'disk_checksum_errors': 0,
-                'vdev_name': 'stripe',
-                'vdev_type': 'dedup',
-                'vdev_disks': [
-                    f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}'
-                ]
-            },
-            f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}': {
-                'pool_name': POOL_NAME,
-                'disk_status': 'ONLINE',
-                'disk_read_errors': 0,
-                'disk_write_errors': 0,
-                'disk_checksum_errors': 0,
-                'vdev_name': 'stripe',
-                'vdev_type': 'special',
-                'vdev_disks': [
-                    f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}'
-                ]
-            },
-            f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}': {
-                'pool_name': POOL_NAME,
-                'disk_status': 'ONLINE',
-                'disk_read_errors': 0,
-                'disk_write_errors': 0,
-                'disk_checksum_errors': 0,
-                'vdev_name': 'stripe',
-                'vdev_type': 'l2cache',
-                'vdev_disks': [
-                    f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}'
-                ]
-            },
-            f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}': {
-                'pool_name': POOL_NAME,
-                'disk_status': 'ONLINE',
-                'disk_read_errors': 0,
-                'disk_write_errors': 0,
-                'disk_checksum_errors': 0,
-                'vdev_name': 'stripe' if not replaced else 'spare-0',
-                'vdev_type': 'data',
-                'vdev_disks': [
-                    f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}'
-                ] if not replaced else [
-                    f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
-                    f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
-                ]
-            }
-        },
-        POOL_NAME: {
-            'spares': {
-                f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'AVAIL' if not replaced else 'INUSE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'stripe',
-                    'vdev_type': 'spares',
-                    'vdev_disks': [
-                        f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
-                    ]
-                }
-            },
-            'logs': {
-                f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'ONLINE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'stripe',
-                    'vdev_type': 'logs',
-                    'vdev_disks': [
-                        f'{disk_uuid_mapping[unused_disks[3]] if not real_paths else unused_disks[3]}'
-                    ]
-                }
-            },
-            'dedup': {
-                f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'ONLINE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'stripe',
-                    'vdev_type': 'dedup',
-                    'vdev_disks': [
-                        f'{disk_uuid_mapping[unused_disks[2]] if not real_paths else unused_disks[2]}'
-                    ]
-                }
-            },
-            'special': {
-                f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'ONLINE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'stripe',
-                    'vdev_type': 'special',
-                    'vdev_disks': [
-                        f'{disk_uuid_mapping[unused_disks[5]] if not real_paths else unused_disks[5]}'
-                    ]
-                }
-            },
-            'l2cache': {
-                f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'ONLINE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'stripe',
-                    'vdev_type': 'l2cache',
-                    'vdev_disks': [
-                        f'{disk_uuid_mapping[unused_disks[0]] if not real_paths else unused_disks[0]}'
-                    ]
-                }
-            },
-            'data': {
-                f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'ONLINE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'stripe',
-                    'vdev_type': 'data',
-                    'vdev_disks': [
-                        f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}'
-                    ]
-                }
-            } if not replaced else {
-                f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'ONLINE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'spare-0',
-                    'vdev_type': 'data',
-                    'vdev_disks': [
-                        f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
-                        f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
-                    ]
-                },
-                f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}': {
-                    'pool_name': POOL_NAME,
-                    'disk_status': 'ONLINE',
-                    'disk_read_errors': 0,
-                    'disk_write_errors': 0,
-                    'disk_checksum_errors': 0,
-                    'vdev_name': 'spare-0',
-                    'vdev_type': 'data',
-                    'vdev_disks':  [
-                        f'{disk_uuid_mapping[unused_disks[1]] if not real_paths else unused_disks[1]}',
-                        f'{disk_uuid_mapping[unused_disks[4]] if not real_paths else unused_disks[4]}'
-                    ]
-                },
-            }
-        }
-    }
-
-
-@pytest.fixture(scope='module')
-def test_pool():
-    unused_disks = call('disk.get_unused')
-    if len(unused_disks) < 7:
-        pytest.skip('Insufficient number of disks to perform these tests')
-
-    with another_pool({
-        'name': POOL_NAME,
-        'topology': {
-            'cache': [{'type': 'STRIPE', 'disks': [unused_disks[0]['name']]}],
-            'data': [{'type': 'STRIPE', 'disks': [unused_disks[1]['name']]}],
-            'dedup': [{'type': 'STRIPE', 'disks': [unused_disks[2]['name']]}],
-            'log': [{'type': 'STRIPE', 'disks': [unused_disks[3]['name']]}],
-            'spares': [unused_disks[4]['name']],
-            'special': [{'type': 'STRIPE', 'disks': [unused_disks[5]['name']]}]
-        },
-        'allow_duplicate_serials': True,
-    }) as pool_info:
-        yield pool_info, unused_disks
-
-
-@pytest.mark.parametrize('real_path', [True, False])
-def test_zpool_status_format(test_pool, real_path):
-    assert call('zpool.status', {'name': POOL_NAME, 'real_paths': real_path}) == get_pool_status(
-        [disk['name'] for disk in test_pool[1]], real_path
-    )
-
-
-def test_replaced_disk_zpool_status_format(test_pool):
-    disk_mapping = get_disk_uuid_mapping([disk['name'] for disk in test_pool[1]])
-    data_disk = test_pool[1][1]['name']
-    spare_disk = test_pool[1][4]['name']
-    ssh(
-        f'zpool replace '
-        f'{test_pool[0]["name"]} '
-        f'{os.path.basename(disk_mapping[data_disk])} '
-        f'{os.path.basename(disk_mapping[spare_disk])}',
-    )
-    for real_path in (True, False):
-        assert call(
-            'zpool.status', {"name": POOL_NAME, "real_paths": real_path}
-        ) == get_pool_status(
-            [disk['name'] for disk in test_pool[1]], real_path, True
-        )