diff --git a/src/middlewared/middlewared/api/v25_04_0/user.py b/src/middlewared/middlewared/api/v25_04_0/user.py index 2da4fb7aff5bb..ff72d2c34fe50 100644 --- a/src/middlewared/middlewared/api/v25_04_0/user.py +++ b/src/middlewared/middlewared/api/v25_04_0/user.py @@ -40,7 +40,6 @@ class UserEntry(BaseModel): local: bool immutable: bool twofactor_auth_configured: bool - nt_name: str | None sid: str | None roles: list[str] @@ -54,7 +53,6 @@ class UserCreate(UserEntry): local: Excluded = excluded_field() immutable: Excluded = excluded_field() twofactor_auth_configured: Excluded = excluded_field() - nt_name: Excluded = excluded_field() sid: Excluded = excluded_field() roles: Excluded = excluded_field() diff --git a/src/middlewared/middlewared/plugins/account.py b/src/middlewared/middlewared/plugins/account.py index fb98257e2341b..549fa4f326b38 100644 --- a/src/middlewared/middlewared/plugins/account.py +++ b/src/middlewared/middlewared/plugins/account.py @@ -30,12 +30,14 @@ from middlewared.utils.privilege import credential_has_full_admin, privileges_group_mapping from middlewared.validators import Range from middlewared.async_validators import check_path_resides_within_volume +from middlewared.utils.sid import db_id_to_rid, DomainRid from middlewared.plugins.account_.constants import ( ADMIN_UID, ADMIN_GID, SKEL_PATH, DEFAULT_HOME_PATH, DEFAULT_HOME_PATHS ) from middlewared.plugins.smb_.constants import SMBBuiltin from middlewared.plugins.idmap_.idmap_constants import ( BASE_SYNTHETIC_DATASTORE_ID, + IDType, TRUENAS_IDMAP_DEFAULT_LOW, SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX @@ -203,6 +205,7 @@ async def user_extend_context(self, rows, extra): memberships[uid] = [i['group']['id']] return { + 'server_sid': await self.middleware.call('smb.local_server_sid'), 'memberships': memberships, 'user_2fa_mapping': ({ entry['user']['id']: bool(entry['secret']) for entry in await self.middleware.call( @@ -239,11 +242,15 @@ async def user_extend(self, user, ctx): user_roles |= set(entry) + if user['smb']: + sid = f'{ctx["server_sid"]}-{db_id_to_rid(IDType.USER, user["id"])}' + else: + sid = None + user.update({ 'local': True, 'id_type_both': False, - 'nt_name': None, - 'sid': None, + 'sid': sid, 'roles': list(user_roles) }) return user @@ -253,7 +260,6 @@ def user_compress(self, user): to_remove = [ 'local', 'id_type_both', - 'nt_name', 'sid', 'immutable', 'home_create', @@ -268,15 +274,10 @@ def user_compress(self, user): async def query(self, filters, options): """ - Query users with `query-filters` and `query-options`. As a performance optimization, only local users - will be queried by default. + Query users with `query-filters` and `query-options`. - Expanded information may be requested by specifying the extra option - `"extra": {"additional_information": []}`. - - The following `additional_information` options are supported: - `SMB` - include Windows SID and NT Name for user. If this option is not specified, then these - keys will have `null` value. + If users provided by Active Directory or LDAP are not desired, then + a "local", "=", False should be added to filters. """ ds_users = [] options = options or {} @@ -291,27 +292,11 @@ async def query(self, filters, options): datastore_options.pop('offset', None) datastore_options.pop('select', None) - extra = options.get('extra', {}) - additional_information = extra.get('additional_information', []) - - username_sid = {} - if 'SMB' in additional_information: - try: - for u in await self.middleware.call("smb.passdb_list", True): - username_sid.update({u['Unix username']: { - 'nt_name': u['NT username'], - 'sid': u['User SID'], - }}) - except Exception: - # Failure to retrieve passdb list often means that system dataset is - # broken - self.logger.error('Failed to retrieve passdb information', exc_info=True) - if filters_include_ds_accounts(filters): ds = await self.middleware.call('directoryservices.status') if ds['type'] is not None and ds['status'] == DSStatus.HEALTHY.name: ds_users = await self.middleware.call( - 'directoryservices.cache.query', 'USER', filters, options.copy() + 'directoryservices.cache.query', 'USER', filters, options.copy() ) match DSType(ds['type']): @@ -330,17 +315,6 @@ async def query(self, filters, options): 'datastore.query', self._config.datastore, [], datastore_options ) - if username_sid: - for entry in result: - smb_entry = username_sid.get(entry['username'], { - 'nt_name': '', - 'sid': '', - }) - if smb_entry['sid']: - smb_entry['nt_name'] = smb_entry['nt_name'] or entry['username'] - - entry.update(smb_entry) - return await self.middleware.run_in_thread( filter_list, result + ds_users, filters, options ) @@ -920,7 +894,6 @@ def do_delete(self, audit_callback, pk, options): '(LDAP server or domain controller).', errno.EPERM ) - user = self.middleware.call_sync('user.get_instance', pk) audit_callback(user['username']) @@ -1770,7 +1743,9 @@ async def group_extend_context(self, rows, extra): else: mem[gid] = [uid] - return {"memberships": mem, "privileges": privileges} + server_sid = await self.middleware.call('smb.local_server_sid') + + return {"memberships": mem, "privileges": privileges, "server_sid": server_sid} @private async def group_extend(self, group, ctx): @@ -1783,11 +1758,21 @@ async def group_extend(self, group, ctx): if {'method': '*', 'resource': '*'} in privilege_mappings['allowlist']: privilege_mappings['roles'].append('FULL_ADMIN') + match group['group']: + case 'builtin_administrators': + sid = f'{ctx["server_sid"]}-{DomainRid.ADMINS}' + case 'builtin_guests': + sid = f'{ctx["server_sid"]}-{DomainRid.GUESTS}' + case _: + if group['smb']: + sid = f'{ctx["server_sid"]}-{db_id_to_rid(IDType.GROUP, group["id"])}' + else: + sid = None + group.update({ 'local': True, 'id_type_both': False, - 'nt_name': None, - 'sid': None, + 'sid': sid, 'roles': privilege_mappings['roles'] }) return group @@ -1798,7 +1783,6 @@ async def group_compress(self, group): 'name', 'local', 'id_type_both', - 'nt_name', 'sid', 'roles' ] @@ -1811,14 +1795,7 @@ async def group_compress(self, group): @filterable async def query(self, filters, options): """ - Query groups with `query-filters` and `query-options`. As a performance optimization, only local groups - will be queried by default. - - Expanded information may be requested by specifying the extra option `"extra": {"additional_information": []}`. - - The following `additional_information` options are supported: - `SMB` - include Windows SID and NT Name for group. If this option is not specified, then these - keys will have `null` value. + Query groups with `query-filters` and `query-options`. """ ds_groups = [] options = options or {} @@ -1833,41 +1810,15 @@ async def query(self, filters, options): datastore_options.pop('offset', None) datastore_options.pop('select', None) - extra = options.get('extra', {}) - additional_information = extra.get('additional_information', []) - if filters_include_ds_accounts(filters): ds = await self.middleware.call('directoryservices.status') if ds['type'] is not None and ds['status'] == DSStatus.HEALTHY.name: ds_groups = await self.middleware.call('directoryservices.cache.query', 'GROUP', filters, options) - if 'SMB' in additional_information: - try: - smb_groupmap = await self.middleware.call("smb.groupmap_list") - except Exception: - # If system dataset has failed to properly initialize / is broken - # then looking up groupmaps will fail. - self.logger.error('Failed to retrieve SMB groupmap.', exc_info=True) - smb_groupmap = { - 'local': {}, - 'local_builtins': {} - } - result = await self.middleware.call( 'datastore.query', self._config.datastore, [], datastore_options ) - if 'SMB' in additional_information: - for entry in result: - smb_data = smb_groupmap['local'].get(entry['gid']) - if not smb_data: - smb_data = smb_groupmap['local_builtins'].get(entry['gid'], {'nt_name': '', 'sid': ''}) - - entry.update({ - 'nt_name': smb_data['nt_name'], - 'sid': smb_data['sid'], - }) - return await self.middleware.run_in_thread( filter_list, result + ds_groups, filters, options ) @@ -1925,8 +1876,7 @@ async def create_internal(self, data, reload_users=True): await self.middleware.call('service.reload', 'user') if data['smb']: - gm_job = await self.middleware.call('smb.synchronize_group_mappings') - await gm_job.wait() + await self.middleware.call_sync('smb.add_groupmap', group) return pk @@ -1957,7 +1907,6 @@ async def do_update(self, audit_callback, pk, data): except KeyError: groupname = 'UNKNOWN' - audit_callback(groupname) raise CallError( 'Groups provided by a directory service must be modified through the identity provider ' @@ -1967,8 +1916,6 @@ async def do_update(self, audit_callback, pk, data): group = await self.get_instance(pk) audit_callback(group['name']) - groupmap_changed = False - if data.get('gid') == group['gid']: data.pop('gid') # Only check for duplicate GID if we are updating it @@ -1984,13 +1931,15 @@ async def do_update(self, audit_callback, pk, data): if 'name' in data and data['name'] != group['group']: group['group'] = group.pop('name') if new_smb: - groupmap_changed = True + # group renamed. We can simply add over top since group_mapping.tdb is keyed + # by SID value + await self.middleware.call_sync('smb.add_groupmap', group) else: group.pop('name', None) if new_smb and not old_smb: - groupmap_changed = True + await self.middleware.call_sync('smb.add_groupmap', group) elif old_smb and not new_smb: - groupmap_changed = True + await self.middleware.call_sync('smb.del_groupmap', group['id']) group = await self.group_compress(group) await self.middleware.call('datastore.update', 'account.bsdgroups', pk, group, {'prefix': 'bsdgrp_'}) @@ -2025,11 +1974,6 @@ async def do_update(self, audit_callback, pk, data): ) await self.middleware.call('service.reload', 'user') - - if groupmap_changed: - gm_job = await self.middleware.call('smb.synchronize_group_mappings') - await gm_job.wait() - return pk @accepts(Int('id'), Dict('options', Bool('delete_users', default=False)), audit='Delete group', audit_callback=True) @@ -2080,8 +2024,7 @@ async def do_delete(self, audit_callback, pk, options): await self.middleware.call('datastore.delete', 'account.bsdgroups', pk) if group['smb']: - gm_job = await self.middleware.call('smb.synchronize_group_mappings') - await gm_job.wait() + await self.middleware.call('smb.del_groupmap', group['id']) await self.middleware.call('service.reload', 'user') try: @@ -2100,13 +2043,12 @@ async def get_next_gid(self): """ Get the next available/free gid. """ - used_gids = ( - { - group['bsdgrp_gid'] - for group in await self.middleware.call('datastore.query', 'account.bsdgroups') - } | - set((await self.middleware.call('privilege.used_local_gids')).keys()) - ) + used_gids = { + group['bsdgrp_gid'] + for group in await self.middleware.call('datastore.query', 'account.bsdgroups') + } + used_gids |= set((await self.middleware.call('privilege.used_local_gids')).keys()) + # We should start gid from 3000 to avoid potential conflicts - Reference: NAS-117892 next_gid = 3000 while next_gid in used_gids: diff --git a/src/middlewared/middlewared/plugins/directoryservices_/util_cache.py b/src/middlewared/middlewared/plugins/directoryservices_/util_cache.py index b5f7d34f62910..18e9878baebb4 100644 --- a/src/middlewared/middlewared/plugins/directoryservices_/util_cache.py +++ b/src/middlewared/middlewared/plugins/directoryservices_/util_cache.py @@ -264,7 +264,6 @@ def fill_cache( 'twofactor_auth_configured': False, 'local': False, 'id_type_both': id_type_both, - 'nt_name': user_data.pw_name, 'smb': u['sid'] is not None, 'sid': u['sid'], 'roles': [] @@ -303,7 +302,6 @@ def fill_cache( 'users': [], 'local': False, 'id_type_both': id_type_both, - 'nt_name': group_data.gr_name, 'smb': g['sid'] is not None, 'sid': g['sid'], 'roles': [] diff --git a/src/middlewared/middlewared/plugins/idmap.py b/src/middlewared/middlewared/plugins/idmap.py index 03f5e985132d3..2190b21770cdb 100644 --- a/src/middlewared/middlewared/plugins/idmap.py +++ b/src/middlewared/middlewared/plugins/idmap.py @@ -15,6 +15,11 @@ from middlewared.plugins.idmap_.idmap_sss import SSSClient import middlewared.sqlalchemy as sa from middlewared.utils import filter_list +from middlewared.utils.sid import ( + get_domain_rid, + BASE_RID_GROUP, + BASE_RID_USER, +) from middlewared.utils.tdb import ( get_tdb_handle, TDBDataType, @@ -30,7 +35,6 @@ WINBIND_IDMAP_FILE = '/var/run/samba-lock/gencache.tdb' WINBIND_IDMAP_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES) - def clear_winbind_cache(): with get_tdb_handle(WINBIND_IDMAP_FILE, WINBIND_IDMAP_TDB_OPTIONS) as hdl: return hdl.clear() @@ -954,11 +958,14 @@ def convert_sids(self, sidlist): unmapped = {} to_check = [] + server_sid = self.middleware.call_sync('smb.local_server_sid') + netbiosname = self.middleware.call_sync('smb.config')['netbiosname'] + for sid in sidlist: try: - entry = self.__unixsid_to_name(sid, client.separator) - except KeyError: - # This is a Unix Sid, but account doesn't exist + entry = self.__local_sid_to_entry(server_sid, netbiosname, sid, client.separator) + except (KeyError, ValidationErrors): + # This is a Unix SID or a local SID, but account doesn't exist unmapped.update({sid: sid}) continue @@ -1044,7 +1051,7 @@ def convert_unixids(self, id_list): return output - def __unixsid_to_name(self, sid, separator='\\'): + def __unixsid_to_entry(self, sid, separator): if not sid.startswith((SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX)): return None @@ -1067,6 +1074,40 @@ def __unixsid_to_name(self, sid, separator='\\'): 'sid': sid } + def __local_sid_to_entry(self, server_sid, netbiosname, sid, separator): + if sid.startswith((SID_LOCAL_USER_PREFIX, SID_LOCAL_GROUP_PREFIX)): + return self.__unixsid_to_entry(sid) + + if not sid.startswith(server_sid): + return None + + rid = get_domain_rid(sid) + if rid > BASE_RID_GROUP: + id_type = IDType.GROUP.name + method = 'group.get_instance' + xid_key = 'gid' + name_key = 'name' + db_id = rid - BASE_RID_GROUP + elif rid > BASE_RID_USER: + id_type = IDType.USER.name + method = 'user.get_instance' + xid_key = 'uid' + name_key = 'username' + db_id = rid - BASE_RID_USER + else: + self.logger.warning('%s: unexpected local SID value', sid) + return None + + entry = self.middleware.call_sync(method, db_id) + + return { + 'name': f'{netbiosname}{separator}{entry[name_key]}', + 'id': entry[xid_key], + 'id_type': id_type, + 'sid': sid + } + + @private @filterable async def builtins(self, filters, options): diff --git a/src/middlewared/middlewared/plugins/smb.py b/src/middlewared/middlewared/plugins/smb.py index 5218f32d528f4..8e2d772e63af8 100644 --- a/src/middlewared/middlewared/plugins/smb.py +++ b/src/middlewared/middlewared/plugins/smb.py @@ -368,7 +368,7 @@ async def configure(self, job, create_paths=True): self.logger.warning("Failed to set immutable flag on /var/empty", exc_info=True) job.set_progress(30, 'Setting up server SID.') - await self.middleware.call('smb.set_sid', data['cifs_SID']) + await self.middleware.call('smb.set_system_sid') """ If the ldap passdb backend is being used, then the remote LDAP server @@ -697,11 +697,8 @@ async def do_update(self, app, data): await self.apply_aapl_changes() if old['netbiosname_local'] != new_config['netbiosname_local']: - new_sid = await self.middleware.call("smb.get_system_sid") - await self.middleware.call("smb.set_database_sid", new_sid) - new_config["cifs_SID"] = new_sid + await self.middleware.call('smb.set_system_sid') await self.middleware.call('idmap.gencache.flush') - await self.middleware.call("smb.synchronize_group_mappings") srv = (await self.middleware.call("network.configuration.config"))["service_announcement"] await self.middleware.call("network.configuration.toggle_announcement", srv) diff --git a/src/middlewared/middlewared/plugins/smb_/constants.py b/src/middlewared/middlewared/plugins/smb_/constants.py index 649482ad25e8e..7282a48f7f865 100644 --- a/src/middlewared/middlewared/plugins/smb_/constants.py +++ b/src/middlewared/middlewared/plugins/smb_/constants.py @@ -52,6 +52,18 @@ class SMBBuiltin(enum.Enum): GUESTS = ('builtin_guests', 'S-1-5-32-546') USERS = ('builtin_users', 'S-1-5-32-545') + @property + def nt_name(self): + return self.value[0][8:].capitalize() + + @property + def sid(self): + return self.value[1] + + @property + def rid(self): + return int(self.value[1].split('-')[-1]) + def unix_groups(): return [x.value[0] for x in SMBBuiltin] diff --git a/src/middlewared/middlewared/plugins/smb_/groupmap.py b/src/middlewared/middlewared/plugins/smb_/groupmap.py index 09d6fe806d16b..175325f487b09 100644 --- a/src/middlewared/middlewared/plugins/smb_/groupmap.py +++ b/src/middlewared/middlewared/plugins/smb_/groupmap.py @@ -1,20 +1,45 @@ -from middlewared.service import Service, job, private -from middlewared.service_exception import CallError -from middlewared.utils import run -from middlewared.plugins.smb import SMBCmd, SMBBuiltin, SMBPath - import os -import json import tdb import struct -# This follows JSON output version for net_groupmap.c -# Output format may change between this and final version accepted -# upstream, but Samba project has standardized on following version format -GROUPMAP_JSON_VERSION = {"major": 0, "minor": 1} -WINBINDD_AUTO_ALLOCATED = ['S-1-5-32-544', 'S-1-5-32-545', 'S-1-5-32-546'] +from middlewared.service import Service, job, private +from middlewared.service_exception import CallError +from middlewared.utils.sid import ( + BASE_RID_USER, + db_id_to_rid, + get_domain_rid, + lsa_sidtype, + DomainRid +) +from middlewared.utils.tdb import ( + get_tdb_handle, + TDBDataType, + TDBPathType, + TDBOptions, +) +from middlewared.plugins.idmap_.idmap_constants import IDType +from middlewared.plugins.smb_.constants import SMBBuiltin, SMBPath +from middlewared.plugins.smb_.util_groupmap import ( + delete_groupmap_entry, + insert_groupmap_entries, + query_groupmap_entries, + GroupmapFile, + GroupmapEntryType, + SMBGroupMap, + SMBGroupMembership, +) + +WINBINDD_AUTO_ALLOCATED = ('S-1-5-32-544', 'S-1-5-32-545', 'S-1-5-32-546') WINBINDD_WELL_KNOWN_PADDING = 100 +WINBIND_IDMAP_CACHE = f'{SMBPath.CACHE_DIR.platform()}/winbindd_cache.tdb' +WINBIND_IDMAP_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES) + + +def clear_winbind_idmap_cache(): + with get_tdb_handle(WINBIND_IDMAP_CACHE, WINBIND_IDMAP_TDB_OPTIONS) as hdl: + return hdl.clear() + class SMBService(Service): @@ -23,97 +48,30 @@ class Config: service_verb = 'restart' @private - async def json_check_version(self, version): - if version == GROUPMAP_JSON_VERSION: - return - - raise CallError( - "Unexpected JSON version returned from Samba utils: " - f"[{version}]. Expected version was: [{GROUPMAP_JSON_VERSION}]. " - "Behavior is undefined with a version mismatch and so refusing " - "to perform groupmap operation. Please file a bug report at " - "jira.ixsystems.com with this traceback." + def add_groupmap(self, group): + server_sid = self.middleware.call_sync('smb.local_server_sid') + rid = db_id_to_rid(IDType.GROUP, group['id']) + entry = SMBGroupMap( + sid=f'{server_sid}-{rid}', + gid=group['gid'], + sid_type=lsa_sidtype.ALIAS, + name=group['group'], + comment='' ) + insert_groupmap_entries(GroupmapFile.DEFAULT, [entry]) @private - async def groupmap_listmem(self, sid): - payload = json.dumps({"alias": sid}) - lm = await run([ - SMBCmd.NET.value, "--json", "groupmap", "listmem", payload - ], check=False) - - # Command will return ENOENT when fails with STATUS_NO_SUCH_ALIAS - if lm.returncode == 2: - return [] - elif lm.returncode != 0: - raise CallError(f"Failed to list membership of alias [{sid}]: " - f"{lm.stderr.decode()}") - - output = json.loads(lm.stdout.decode()) - await self.json_check_version(output['version']) - - return [x["sid"] for x in output['members']] - - @private - async def groupmap_addmem(self, alias, member): - payload = f'data={json.dumps({"alias": alias, "member": member})}' - am = await run([ - SMBCmd.NET.value, "--json", "groupmap", "addmem", payload, - ], check=False) - if am.returncode != 0: - raise CallError( - f"Failed to add [{member}] to [{alias}]: {am.stderr.decode()}" - ) - - @private - async def diff_membership(self, actual, expected): - """ - Generate a diff between expected members of an alias vs - actual members. This is used for batch operation to add - or remove memberships. Since these memberships affect - how nss_winbind generates passwd entries, and also rights - evaluation in samba (for instance when a non-owner tries - to change ownership of a file), it is important that - we have no unexpected entries here. - """ - out = {"ADDMEM": [], "DELMEM": []} - - actual_set = set(actual) - expected_set = set(expected) - - out["ADDMEM"] = [{"sid": x} for x in expected_set - actual_set] - out["DELMEM"] = [{"sid": x} for x in actual_set - expected_set] - - return out - - @private - async def update_payload_with_diff(self, payload, alias, diff, ad): - async def add_to_payload(payload, alias, key, members): - idx = next((i for i, x in enumerate(payload[key]) if x["alias"] == alias), None) - if not idx: - payload[key].append({ - "alias": alias, - "members": members, - }) - else: - payload[key][idx]["members"].append(members) - - if diff.get("ADDMEM"): - await add_to_payload(payload, alias, "ADDMEM", diff["ADDMEM"]) - - """ - If AD is FAULTED or in process of joining or leaving AD, - then we may not have an accurate picture of what should be - in the alias member list. In this case, defer member removal - until next groupmap synchronization. - """ - if ad in ["HEALTHY", "DISABLED"] and diff.get("DELMEM"): - await add_to_payload(payload, alias, "DELMEM", diff["DELMEM"]) - - return + def del_groupmap(self, db_id): + server_sid = self.middleware.call_sync('smb.local_server_sid') + rid = db_id_to_rid(IDType.GROUP, db_id) + delete_groupmap_entry( + GroupmapFile.DEFAULT, + GroupmapEntryType.GROUP_MAPPING, + entry_sid=f'{server_sid}-{rid}', + ) @private - async def sync_foreign_groups(self): + def sync_foreign_groups(self): """ Domain Users, and Domain Admins must have S-1-5-32-545 and S-1-5-32-544 added to their respective Unix tokens for correct behavior in AD domain. @@ -122,66 +80,44 @@ async def sync_foreign_groups(self): when newly creating these groups (if they don't exist), but can get lost, resulting in unexpected / erratic permissions behavior. """ - domain_sid = None - payload = {"ADDMEM": [], "DELMEM": []} # second groupmap listing is to ensure we have accurate / current info. - groupmap = await self.groupmap_list() - admin_group = (await self.middleware.call('smb.config'))['admin_group'] + entries = [] - ad_state = await self.middleware.call('activedirectory.get_state') - if ad_state == 'HEALTHY': - try: - domain_info = await self.middleware.call('idmap.domain_info', - 'DS_TYPE_ACTIVEDIRECTORY') - domain_sid = domain_info['sid'] - except Exception: - self.logger.warning('Failed to retrieve idmap domain info', exc_info=True) + groupmap = self.groupmap_list() + localsid = groupmap['localsid'] - """ - Administrators should only have local and domain admins, and a user- - designated "admin group" (if specified). - """ - admins = await self.groupmap_listmem("S-1-5-32-544") - expected = [groupmap['local_builtins'][544]['sid']] - if domain_sid: - expected.append(f'{domain_sid}-512') - - if admin_group: - admin_sid = None - grp_obj = await self.middleware.call( - 'group.query', - [('group', '=', admin_group), ('local', '=', True)], - {'extra': {'additional_information': ['SMB', 'DS']}} - ) - if grp_obj: - admin_sid = grp_obj[0]['sid'] - - if admin_sid: - expected.append(admin_sid) - - diff = await self.diff_membership(admins, expected) - await self.update_payload_with_diff(payload, "S-1-5-32-544", diff, ad_state) + admins = [f'{localsid}-{DomainRid.ADMINS}'] + guests = [f'{localsid}-{DomainRid.GUESTS}'] - # Users should only have local users and domain users - users = await self.groupmap_listmem("S-1-5-32-545") - if domain_sid: - expected.append(f'{domain_sid}-513') + # Samba has special behavior if DomainRid.USERS is set for local domain + # and so we map the builtin_users account to a normal sid then make it + # a member of S-1-5-32-545 + users = [groupmap['local'][545]['sid']] - diff = await self.diff_membership(users, expected) - await self.update_payload_with_diff(payload, "S-1-5-32-545", diff, ad_state) + if (admin_group := self.middleware.call_sync('smb.config')['admin_group']): + if (found := self.middleware.call_sync('group.query', [('group', '=', admin_group)])): + admins.append(found[0]['sid']) + else: + self.logger.warning('%s: SMB admin group does not exist', admin_group) - guests = await self.groupmap_listmem("S-1-5-32-546") - expected = [ - groupmap['local_builtins'][546]['sid'], - f'{groupmap["localsid"]}-501' - ] - if domain_sid: - expected.append(f'{domain_sid}-514') + ad_state = self.middleware.call_sync('activedirectory.get_state') + if ad_state == 'HEALTHY': + try: + domain_info = self.middleware.call_sync('idmap.domain_info', + 'DS_TYPE_ACTIVEDIRECTORY') + domain_sid = domain_info['sid'] - diff = await self.diff_membership(guests, expected) - await self.update_payload_with_diff(payload, "S-1-5-32-546", diff, ad_state) + # add domain account SIDS + admins.append(f'{domain_sid}-{DomainRid.ADMINS}') + users.append(f'{domain_sid}-{DomainRid.USERS}') + guests.append(f'{domain_sid}-{DomainRid.GUESTS}') + except Exception: + self.logger.warning('Failed to retrieve idmap domain info', exc_info=True) - await self.batch_groupmap(payload) + entries.append(SMBGroupMembership(sid=SMBBuiltin.ADMINISTRATORS.sid, members=tuple(set(admins)))) + entries.append(SMBGroupMembership(sid=SMBBuiltin.USERS.sid, members=tuple(set(users)))) + entries.append(SMBGroupMembership(sid=SMBBuiltin.GUESTS.sid, members=tuple(set(guests)))) + insert_groupmap_entries(GroupmapFile.DEFAULT, entries) @private def initialize_idmap_tdb(self, low_range): @@ -290,29 +226,21 @@ def remove_key(tdb_handle, key, reverse): return must_reload @private - async def groupmap_list(self): + def groupmap_list(self): """ - Convert JSON groupmap output to dict to get O(1) lookups by `gid` - Separate out the groupmap output into builtins, locals, and invalid entries. Invalid entries are ones that aren't from our domain, or are mapped to gid -1. Latter occurs when group mapping is lost. In case of invalid entries, we store list of SIDS to be removed. SID is necessary and sufficient for groupmap removal. """ - rv = {"builtins": {}, "local": {}, "local_builtins": {}, "invalid": []} + rv = {"builtins": {}, "local": {}, "local_builtins": {}} - localsid = await self.middleware.call('smb.get_system_sid') - if localsid is None: - raise CallError("Unable to retrieve local system SID. Group mapping failure.") + localsid = self.middleware.call_sync('smb.local_server_sid') + legacy_entries = [] - out = await run([SMBCmd.NET.value, '--json', 'groupmap', 'list', '{"verbose": true}'], check=False) - if out.returncode != 0: - raise CallError(f'groupmap list failed with error {out.stderr.decode()}') - - gm = json.loads(out.stdout.decode()) - await self.json_check_version(gm['version']) - - for g in gm['groupmap']: + for g in query_groupmap_entries(GroupmapFile.DEFAULT, [ + ['entry_type', '=', GroupmapEntryType.GROUP_MAPPING.name] + ], {}): gid = g['gid'] key = 'invalid' if gid == -1: @@ -324,6 +252,10 @@ async def groupmap_list(self): elif g['sid'].startswith(localsid) and g['gid'] in (544, 546): key = 'local_builtins' elif g['sid'].startswith(localsid): + if int(get_domain_rid(g['sid'])) < BASE_RID_USER: + legacy_entries.append(g) + continue + key = 'local' if key == 'invalid' or rv[key].get(gid): @@ -332,168 +264,113 @@ async def groupmap_list(self): rv[key][gid] = g - rv["localsid"] = localsid + rv['localsid'] = localsid + + for entry in legacy_entries: + # keep copy of legacy groupmap entries so that we can rewrite our share_info.tdb file + gm = SMBGroupMap( + sid=entry['sid'], + gid=entry['gid'], + sid_type=lsa_sidtype.ALIAS, + name=entry['name'], + comment=entry['comment'] + ) + insert_groupmap_entries(GroupmapFile.REJECT, [gm]) + + try: + delete_groupmap_entry( + GroupmapFile.DEFAULT, + GroupmapEntryType.GROUP_MAPPING, + entry_sid=entry['sid'], + ) + except Exception: + self.logger.debug('Failed to delete legacy entry', exc_info=True) + return rv @private - async def sync_builtins(self, groupmap): - idmap_backend = await self.middleware.call("smb.getparm", "idmap config * : backend", "GLOBAL") - idmap_range = await self.middleware.call("smb.getparm", "idmap config * : range", "GLOBAL") - payload = {"ADD": [{"groupmap": []}], "MOD": [{"groupmap": []}], "DEL": [{"groupmap": []}]} - must_reload = False + def sync_builtins(self, to_add): + idmap_backend = self.middleware.call_sync("smb.getparm", "idmap config * : backend", "GLOBAL") + idmap_range = self.middleware.call_sync("smb.getparm", "idmap config * : range", "GLOBAL") if idmap_backend != "tdb": """ idmap_autorid and potentially other allocating idmap backends may be used for the default domain. We do not want to touch how these are allocated. """ - return must_reload + return False low_range = int(idmap_range.split("-")[0].strip()) - sid_lookup = {x["sid"]: x for x in groupmap.values()} - for b in (SMBBuiltin.ADMINISTRATORS, SMBBuiltin.USERS, SMBBuiltin.GUESTS): - sid = b.value[1] - rid = int(sid.split('-')[-1]) - gid = low_range + (rid - 544) - entry = sid_lookup.get(sid, None) - if entry and entry['gid'] == gid: - # Value is correct, nothing to do. - continue - - # If group type is incorrect, it entry must be deleted before re-adding. - elif entry and entry['gid'] != gid and entry['group_type_int'] != 4: - payload['DEL'][0]['groupmap'].append({ - 'sid': str(sid), - }) - payload['ADD'][0]['groupmap'].append({ - 'sid': str(sid), - 'gid': gid, - 'group_type_str': 'local', - 'nt_name': b.value[0][8:].capitalize() - }) - elif entry and entry['gid'] != gid: - payload['MOD'][0]['groupmap'].append({ - 'sid': str(sid), - 'gid': gid, - 'group_type_str': 'local', - 'nt_name': b.value[0][8:].capitalize() - }) - else: - payload['ADD'][0]['groupmap'].append({ - 'sid': str(sid), - 'gid': gid, - 'group_type_str': 'local', - 'nt_name': b.value[0][8:].capitalize() - }) - - await self.batch_groupmap(payload) - if (await self.middleware.call('smb.validate_groupmap_hwm', low_range)): - must_reload = True - - return must_reload + gid = low_range + b.rid - 544 + to_add.append(SMBGroupMap( + sid=b.sid, + gid=gid, + sid_type=lsa_sidtype.ALIAS, + name=b.nt_name, + comment='' + )) - @private - async def batch_groupmap(self, data): - for op in ["ADD", "MOD", "DEL"]: - if data.get(op) is not None and len(data[op]) == 0: - data.pop(op) - - payload = json.dumps(data) - out = await run([SMBCmd.NET.value, '--json', 'groupmap', 'batch', payload], check=False) - if out.returncode != 0: - raise CallError(f'Batch operation for [{data}] failed with error {out.stderr.decode()}') + return self.validate_groupmap_hwm(low_range) @private - @job(lock="groupmap_sync") - async def synchronize_group_mappings(self, job, bypass_sentinel_check=False): + @job(lock="groupmap_sync", lock_queue_size=1) + def synchronize_group_mappings(self, job, bypass_sentinel_check=False): """ This method does the following: - 1) prepares payload for a batch groupmap operation. These are added to two arrays: - "to_add" and "to_del". Missing entries are added, invalid entries are deleted. - 2) we synchronize S-1-5-32-544, S-1-5-32-545, and S-1-5-32-546 separately - 3) we add any required group mappings for the SIDs in (2) above. - 4) we flush various caches if required. + 1) ensures that group_mapping.tdb has all required groupmap entries + 2) ensures that builtin SIDs S-1-5-32-544, S-1-5-32-545, and S-1-5-32-546 + exist and are deterministically mapped to expected GIDs + 3) ensures that all expected foreign aliases for builtin SIDs above exist. + 4) flush various caches if required. """ - payload = {} - to_add = [] - to_del = [] - - if await self.middleware.call('ldap.get_state') != "DISABLED": - return + entries = [] - if not bypass_sentinel_check and not await self.middleware.call('smb.is_configured'): + if not bypass_sentinel_check and not self.middleware.call_sync('smb.is_configured'): raise CallError( "SMB server configuration is not complete. " "This may indicate system dataset setup failure." ) - groupmap = await self.groupmap_list() - must_remove_cache = False - - groups = await self.middleware.call('group.query', [('builtin', '=', False), ('local', '=', True), ('smb', '=', True)]) - g_dict = {x["gid"]: x for x in groups} - g_dict[545] = await self.middleware.call('group.query', [('gid', '=', 545), ('local', '=', True)], {'get': True}) - - intersect = set(g_dict.keys()).intersection(set(groupmap["local"].keys())) - - set_to_add = set(g_dict.keys()) - set(groupmap["local"].keys()) - set_to_del = set(groupmap["local"].keys()) - set(g_dict.keys()) - set_to_mod = set([x for x in intersect if groupmap['local'][x]['nt_name'] != g_dict[x]['group']]) - - to_add = [{ - "dbid": g_dict[x]["id"], - "gid": g_dict[x]["gid"], - "nt_name": g_dict[x]["group"], - "group_type_str": "local" - } for x in set_to_add] - - to_mod = [{ - "gid": g_dict[x]["gid"], - "nt_name": g_dict[x]["group"], - "sid": groupmap["local"][x]["sid"], - "group_type_str": "local" - } for x in set_to_mod] - - to_del = [{ - "sid": groupmap["local"][x]["sid"] - } for x in set_to_del] - - for sid in groupmap['invalid']: - to_del.append({"sid": sid}) - - for gid in (544, 546): - if not groupmap["local_builtins"].get(gid): - builtin = SMBBuiltin.by_rid(gid) - rid = 512 + (gid - 544) - sid = f'{groupmap["localsid"]}-{rid}' - to_add.append({ - "gid": gid, - "nt_name": f"local_{builtin.name.lower()}", - "group_type_str": "local", - "sid": sid, - }) - - if to_add: - payload["ADD"] = [{"groupmap": to_add}] - - if to_mod: - payload["MOD"] = [{"groupmap": to_mod}] - - if to_del: - payload["DEL"] = [{"groupmap": to_del}] - - await self.middleware.call('smb.fixsid') - must_remove_cache = await self.sync_builtins(groupmap['builtins']) - await self.batch_groupmap(payload) - await self.sync_foreign_groups() + groupmap = self.groupmap_list() + + groups = self.middleware.call_sync('group.query', [ + ('builtin', '=', False), ('local', '=', True), ('smb', '=', True) + ]) + groups.append(self.middleware.call_sync('group.query', [('gid', '=', 545), ('local', '=', True)], {'get': True})) + gid_set = {x["gid"] for x in groups} + + for group in groups: + entries.append(SMBGroupMap( + sid=group['sid'], + gid=group['gid'], + sid_type=lsa_sidtype.ALIAS, + name=group['group'], + comment='' + )) + + for entry in groupmap['local'].values(): + # delete entries that don't map to a local account + if entry['gid'] in gid_set: + continue + + try: + delete_groupmap_entry( + GroupmapFile.DEFAULT, + GroupmapEntryType.GROUP_MAPPING, + sid=entry['sid'], + ) + except Exception: + self.logger.warning('%s: failed to remove group mapping', entry['sid'], exc_info=True) + + must_remove_cache = self.sync_builtins(entries) + insert_groupmap_entries(GroupmapFile.DEFAULT, entries) + + self.sync_foreign_groups() if must_remove_cache: - await self.middleware.call('tdb.wipe', { - 'name': f'{SMBPath.CACHE_DIR.platform()}/winbindd_cache.tdb', - 'tdb-options': {'data_type': 'STRING', 'backend': 'CUSTOM'} - }) + clear_winbind_idmap_cache() try: - await self.middleware.call('idmap.gencache.flush') + self.middleware.call_sync('idmap.gencache.flush') except Exception: self.logger.warning('Failed to flush caches after groupmap changes.', exc_info=True) diff --git a/src/middlewared/middlewared/plugins/smb_/passdb.py b/src/middlewared/middlewared/plugins/smb_/passdb.py index 68c55111bc6b0..1430b21f93f32 100644 --- a/src/middlewared/middlewared/plugins/smb_/passdb.py +++ b/src/middlewared/middlewared/plugins/smb_/passdb.py @@ -1,6 +1,8 @@ from middlewared.service import filterable, Service, job, private from middlewared.service_exception import CallError, MatchNotFound from middlewared.utils import run, filter_list +from middlewared.utils.sid import db_id_to_rid +from middlewared.plugins.idmap_.idmap_constants import IDType from middlewared.plugins.smb import SMBCmd, SMBPath import os @@ -92,8 +94,7 @@ async def update_passdb_user(self, user): if user['pdb'] is None: cmd = [SMBCmd.PDBEDIT.value, '-d', '0', '-a', username] - - next_rid = await self.middleware.call('smb.get_next_rid', 'USER', user.get('id')) + next_rid = db_id_to_rid(IDType.USER, user['id']) cmd.extend(['-U', str(next_rid)]) cmd.append('-t') diff --git a/src/middlewared/middlewared/plugins/smb_/sid.py b/src/middlewared/middlewared/plugins/smb_/sid.py index 0cb16fe44ef27..a4eee8f1f097d 100644 --- a/src/middlewared/middlewared/plugins/smb_/sid.py +++ b/src/middlewared/middlewared/plugins/smb_/sid.py @@ -1,10 +1,10 @@ -from middlewared.service import Service, private -from middlewared.utils import run -from middlewared.plugins.smb import SMBCmd - -import re +import subprocess -RE_SID = re.compile(r"S-\d-\d+-(\d+-){1,14}\d+$") +from middlewared.service import Service, private +from middlewared.service_exception import CallError +from middlewared.utils.functools_ import cache +from middlewared.utils.sid import random_sid +from .constants import SMBCmd class SMBService(Service): @@ -13,70 +13,24 @@ class Config: service = 'cifs' service_verb = 'restart' + @cache @private - async def get_system_sid(self): - getSID = await run([SMBCmd.NET.value, "-d", "0", "getlocalsid"], check=False) - if getSID.returncode != 0: - self.logger.debug('Failed to retrieve local system SID: %s', - getSID.stderr.decode()) - return None - - m = RE_SID.search(getSID.stdout.decode().strip()) - if m: - return m.group(0) - - self.logger.debug("getlocalsid returned invalid SID: %s", - getSID.stdout.decode().strip()) - return None - - @private - async def set_sid(self, db_sid): - system_SID = await self.get_system_sid() + def local_server_sid(self): + if (db_sid := self.middleware.call_sync('smb.config')['cifs_SID']): + return db_sid - if system_SID == db_sid: - return True - - if db_sid: - if not await self.set_system_sid(db_sid): - self.logger.debug('Unable to set set SID to %s', db_sid) - return False - else: - if not system_SID: - self.logger.warning('Unable to determine system and database SIDs') - return False - - await self.set_database_sid(system_SID) - return True - - @private - async def set_database_sid(self, SID): - await self.middleware.call('datastore.update', 'services.cifs', 1, {'cifs_SID': SID}) + new_sid = random_sid() + self.middleware.call_sync('datastore.update', 'services.cifs', 1, {'cifs_SID': new_sid}) + return new_sid @private - async def set_system_sid(self, SID): - if not SID: - return False + def set_system_sid(self): + server_sid = self.local_server_sid() - setSID = await run([SMBCmd.NET.value, "-d", "0", "setlocalsid", SID], check=False) - if setSID.returncode != 0: - self.logger.debug("setlocalsid failed with error: %s", - setSID.stderr.decode()) - return False - - return True - - @private - async def fixsid(self, groupmap=None): - """ - Samba generates a new domain sid when its netbios name changes or if samba's secrets.tdb - has been deleted. passdb.tdb will automatically reflect the new mappings, but the groupmap - database is not automatically updated in these circumstances. This check is performed when - synchronizing group mapping database. In case there entries that no longer match our local - system sid, group_mapping.tdb will be removed and re-generated. - """ - db_SID = (await self.middleware.call('smb.config'))['cifs_SID'] - system_sid = await self.get_system_sid() + setsid = subprocess.run([ + SMBCmd.NET.value, '-d', '0', + 'setlocalsid', server_sid, + ], capture_output=True, check=False) - if db_SID != system_sid: - self.logger.warning(f"Domain SID in group_mapping.tdb ({system_sid}) is not SID in nas config ({db_SID}). Updating db") - await self.set_database_sid(system_sid) + if setsid.returncode != 0: + raise CallError(f'setlocalsid failed: {setsid.stderr.decode()}') diff --git a/src/middlewared/middlewared/plugins/smb_/util_groupmap.py b/src/middlewared/middlewared/plugins/smb_/util_groupmap.py new file mode 100644 index 0000000000000..eb1d94b254849 --- /dev/null +++ b/src/middlewared/middlewared/plugins/smb_/util_groupmap.py @@ -0,0 +1,194 @@ +import enum + +from base64 import b64decode, b64encode +from collections.abc import Iterable +from dataclasses import asdict, dataclass +from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH +from middlewared.utils import filter_list +from middlewared.utils.sid import ( + lsa_sidtype +) +from middlewared.utils.tdb import ( + get_tdb_handle, + TDBBatchAction, + TDBBatchOperation, + TDBDataType, + TDBOptions, + TDBPathType, +) +from socket import htonl, ntohl + +UNIX_GROUP_KEY_PREFIX = 'UNIXGROUP/' +MEMBEROF_PREFIX = 'MEMBEROF/' + +GROUP_MAPPING_TDB_OPTIONS = TDBOptions(TDBPathType.CUSTOM, TDBDataType.BYTES) + + +class GroupmapEntryType(enum.Enum): + GROUP_MAPPING = enum.auto() # conventional group mapping entry + MEMBERSHIP = enum.auto() # foreign alias member + + +class GroupmapFile(enum.Enum): + DEFAULT = f'{SYSDATASET_PATH}/samba4/group_mapping.tdb' + REJECT = f'{SYSDATASET_PATH}/samba4/group_mapping_rejects.tdb' + + +@dataclass(frozen=True) +class SMBGroupMap: + sid: str + gid: int + sid_type: lsa_sidtype + name: str + comment: str + + +@dataclass(frozen=True) +class SMBGroupMembership: + sid: str + members: tuple[str] + + +def _parse_unixgroup(tdb_key: str, tdb_val: str) -> SMBGroupMap: + """ parsing function to convert TDB key/value pair into SMBGroupMap """ + sid = tdb_key[len(UNIX_GROUP_KEY_PREFIX):] + data = b64decode(tdb_val) + + # unix groups are written into tdb file via tdb_pack + gid = htonl(int.from_bytes(data[0:4])) + sid_type = lsa_sidtype(htonl(int.from_bytes(data[4:8]))) + + # remaining bytes are two null-terminated strings + bname, bcomment = data[8:-1].split(b'\x00') + return SMBGroupMap(sid, gid, sid_type, bname.decode(), bcomment.decode()) + + +def _parse_memberof(tdb_key: str, tdb_val: str) -> SMBGroupMembership: + """ parsing function to convert TDB key/value pair into SMBGroupMembership """ + sid = tdb_key[len(MEMBEROF_PREFIX):] + data = b64decode(tdb_val) + + members = tuple(data[:-1].decode().split()) + return SMBGroupMembership(sid, members) + + +def _groupmap_to_tdb_key_val(group_map: SMBGroupMap) -> tuple[str, str]: + tdb_key = f'{UNIX_GROUP_KEY_PREFIX}{group_map.sid}' + gid = ntohl(group_map.gid).to_bytes(4) + sid_type = ntohl(group_map.sid_type).to_bytes(4) + name = group_map.name.encode() + comment = group_map.comment.encode() + + data = gid + sid_type + name + b'\x00' + comment + b'\x00' + return (tdb_key, b64encode(data)) + + +def _groupmem_to_tdb_key_val(group_mem: SMBGroupMembership) -> tuple[str, str]: + tdb_key = f'{MEMBEROF_PREFIX}{group_mem.sid}' + data = ' '.join(set(group_mem.members)).encode() + b'\x00' + return (tdb_key, b64encode(data)) + + +def groupmap_entries( + groupmap_file: GroupmapFile, + as_dict: bool = False +) -> Iterable[SMBGroupMap, SMBGroupMembership, dict]: + """ iterate the specified group_mapping.tdb file + + Params: + as_dict - return as dictionary + + Returns: + SMBGroupMap or SMBGroupMembership + + Raises: + RuntimeError + FileNotFoundError + """ + if not isinstance(groupmap_file, GroupmapFile): + raise TypeError(f'{type(groupmap_file)}: expected GroupmapFile type.') + + with get_tdb_handle(groupmap_file.value, GROUP_MAPPING_TDB_OPTIONS) as hdl: + for entry in hdl.entries(): + if entry['key'].startswith(UNIX_GROUP_KEY_PREFIX): + parser_fn = _parse_unixgroup + entry_type = GroupmapEntryType.GROUP_MAPPING.name + elif entry['key'].startswith(MEMBEROF_PREFIX): + parser_fn = _parse_memberof + entry_type = GroupmapEntryType.MEMBERSHIP.name + else: + continue + + if as_dict: + yield {'entry_type': entry_type} | asdict(parser_fn(entry['key'], entry['value'])) + else: + yield parser_fn(entry['key'], entry['value']) + + +def query_groupmap_entries(groupmap_file: GroupmapFile, filters: list, options: dict) -> list[dict]: + try: + return filter_list(groupmap_entries(groupmap_file, as_dict=True), filters, options) + except FileNotFoundError: + return [] + + +def insert_groupmap_entries( + groupmap_file: GroupmapFile, + entries: list[SMBGroupMap | SMBGroupMembership] +) -> None: + """ Insert multiple groupmap entries under a transaction lock """ + + batch_ops = [] + + for entry in entries: + if isinstance(entry, SMBGroupMap): + tdb_key, tdb_val = _groupmap_to_tdb_key_val(entry) + elif isinstance(entry, SMBGroupMembership): + tdb_key, tdb_val = _groupmem_to_tdb_key_val(entry) + else: + raise TypeError(f'{type(entry)}: unexpected group_mapping.tdb entry type') + + batch_ops.append(TDBBatchOperation(action=TDBBatchAction.SET, key=tdb_key, value=tdb_val)) + + if len(batch_ops) == 0: + # nothing to do, avoid taking lock + return + + with get_tdb_handle(groupmap_file.value, GROUP_MAPPING_TDB_OPTIONS) as hdl: + hdl.batch_op(batch_ops) + + +def delete_groupmap_entry( + groupmap_file: GroupmapFile, + entry_type: GroupmapEntryType, + entry_sid: str +): + if not isinstance(groupmap_file, GroupmapFile): + raise TypeError(f'{type(groupmap_file)}: expected GroupmapFile type.') + + if not isinstance(entry_type, GroupmapEntryType): + raise TypeError(f'{type(entry_type)}: expected GroumapEntryType.') + + match entry_type: + case GroupmapEntryType.GROUP_MAPPING: + tdb_key = f'{UNIX_GROUP_KEY_PREFIX}{entry_sid}' + case GroupmapEntryType.MEMBERSHIP: + tdb_key = f'{MEMBEROF_PREFIX}{entry_sid}' + case _: + raise TypeError(f'{entry_type}: unexpected GroumapEntryType.') + + with get_tdb_handle(groupmap_file.value, GROUP_MAPPING_TDB_OPTIONS) as hdl: + hdl.delete(tdb_key) + + +def list_foreign_group_memberships( + groupmap_file: GroupmapFile, + entry_sid: str +) -> SMBGroupMembership: + if not isinstance(groupmap_file, GroupmapFile): + raise TypeError(f'{type(groupmap_file)}: expected GroupmapFile type.') + + with get_tdb_handle(groupmap_file.value, GROUP_MAPPING_TDB_OPTIONS) as hdl: + tdb_key = f'{MEMBEROF_PREFIX}{entry_sid}' + tdb_val = hdl.fetch(tdb_key) + return _parse_memberof(tdb_key, tdb_val) diff --git a/src/middlewared/middlewared/plugins/smb_/utils.py b/src/middlewared/middlewared/plugins/smb_/utils.py index a391455767570..3db23141745e6 100644 --- a/src/middlewared/middlewared/plugins/smb_/utils.py +++ b/src/middlewared/middlewared/plugins/smb_/utils.py @@ -1,4 +1,14 @@ from .constants import SMBSharePreset +from secrets import randbits + + +def random_sid(): + """ See MS-DTYP 2.4.2 SID """ + subauth_1 = randbits(32) + subauth_2 = randbits(32) + subauth_3 = randbits(32) + + return f'S-1-5-21-{subauth_1}-{subauth_2}-{subauth_3}' def smb_strip_comments(auxparam_in): diff --git a/src/middlewared/middlewared/utils/sid.py b/src/middlewared/middlewared/utils/sid.py new file mode 100644 index 0000000000000..9ddd3026125ec --- /dev/null +++ b/src/middlewared/middlewared/utils/sid.py @@ -0,0 +1,142 @@ +import enum + +from secrets import randbits +from middlewared.plugins.idmap_.idmap_constants import BASE_SYNTHETIC_DATASTORE_ID, IDType + + +DOM_SID_PREFIX = 'S-1-5-21-' +DOM_SID_SUBAUTHS = 3 +MAX_VALUE_SUBAUTH = 2 ** 32 +BASE_RID_USER = 20000 +BASE_RID_GROUP = 200000 + + +class DomainRid(enum.IntEnum): + """ Defined in MS-DTYP Section 2.4.2.4 + This is subsest of well-known RID values defined in above document + focused on ones that are of particular significance to permissions and + SMB server behavior + """ + ADMINISTRATOR = 500 # local administrator account + GUEST = 501 # guest account + ADMINS = 512 # domain admins account (local or joined) + USERS = 513 + GUESTS = 514 + COMPUTERS = 515 + + +class WellKnownSid(enum.Enum): + """ Defined in MS-DTYP Section 2.4.2.4 """ + WORLD = 'S-1-1-0' + CREATOR_OWNER = 'S-1-3-0' + CREATOR_GROUP = 'S-1-3-1' + OWNER_RIGHTS = 'S-1-3-4' + AUTHENTICATED_USERS = 'S-1-5-11' + SYSTEM = 'S-1-5-18' + NT_AUTHORITY = 'S-1-5-19' + BUILTIN_ADMINISTRATORS = 'S-1-5-32-544' + BUILTIN_USERS = 'S-1-5-32-545' + BUILTIN_GUESTS = 'S-1-5-32-546' + + @property + def sid(self): + return self.value + + +class lsa_sidtype(enum.IntEnum): + """ librpc/idl/lsa.idl + used for passdb and group mapping databases + """ + USE_NONE = 0 # NOTUSED + USER = 1 # user + DOM_GRP = 2 # domain group + DOMAIN = 3 + ALIAS = 4 # local group + WKN_GRP = 5 # well-known group + DELETED = 6 # deleted account + INVALID = 7 # invalid account + UNKNOWN = 8 + COMPUTER = 9 + LABEL = 10 # mandatory label + + +def random_sid() -> str: + """ See MS-DTYP 2.4.2 SID """ + subauth_1 = randbits(32) + subauth_2 = randbits(32) + subauth_3 = randbits(32) + + return f'S-1-5-21-{subauth_1}-{subauth_2}-{subauth_3}' + + +def sid_is_valid(sid: str) -> bool: + """ + This is validation function should be used with some caution + as it only applies to SID values we reasonably expect to be used + in SMB ACLs or for local user / group accounts + """ + if not isinstance(sid, str): + return False + + # Whitelist some well-known SIDs user may have + if sid in ( + WellKnownSid.WORLD.sid, + WellKnownSid.OWNER_RIGHTS.sid, + WellKnownSid.BUILTIN_ADMINISTRATORS.sid, + WellKnownSid.BUILTIN_USERS.sid, + WellKnownSid.BUILTIN_GUESTS.sid, + ): + return True + + if not sid.startswith(DOM_SID_PREFIX): + # not a domain sid + return False + + subauths = sid[len(DOM_SID_PREFIX):].split('-') + + # SID may have a RID component appended + if len(subauths) < DOM_SID_SUBAUTHS or len(subauths) > DOM_SID_SUBAUTHS + 1: + return False + + for subauth in subauths: + if not subauths.isdigit(): + return False + + subauth_val = int(subauth) + if subauth_val < 1 or subauth_val > MAX_VALUE_SUBAUTH: + return False + + return True + + +def get_domain_rid(sid: str) -> str: + """ get rid component of the specified SID """ + if not sid.startswith(DOM_SID_PREFIX): + raise ValueError(f'{sid}: not a domain SID') + + subauths = sid[len(DOM_SID_PREFIX):].split('-') + if len(subauths) == DOM_SID_SUBAUTHS: + raise ValueError(f'{sid}: does not contain a RID component') + + return subauths[-1] + + +def db_id_to_rid(id_type: IDType, db_id: int) -> int: + """ + Simple algorithm to convert a datastore ID into RID value. Has been + in use since TrueNAS 12. May not be changed because it will break + SMB share ACLs + """ + if not isinstance(db_id, int): + raise ValueError(f'{db_id}: Not an int') + + if db_id >= BASE_SYNTHETIC_DATASTORE_ID: + raise ValueError('Not valid for users and groups from directory services') + + match id_type: + case IDType.USER: + return db_id + BASE_RID_USER + case IDType.GROUP: + return db_id + BASE_RID_GROUP + case _: + raise ValueError(f'{id_type}: unknown ID type') diff --git a/src/middlewared/middlewared/utils/tdb.py b/src/middlewared/middlewared/utils/tdb.py index 7684a81030926..c04cfec2cb830 100644 --- a/src/middlewared/middlewared/utils/tdb.py +++ b/src/middlewared/middlewared/utils/tdb.py @@ -10,14 +10,14 @@ from dataclasses import dataclass from middlewared.plugins.system_dataset.utils import SYSDATASET_PATH from middlewared.service_exception import MatchNotFound -from threading import Lock +from threading import RLock FD_CLOSED = -1 # Robust mutex support was added to libtdb after py-tdb was written and flags # weren't updated. See lib/tdb/include/tdb.h MUTEX_LOCKING = 4096 -TDB_LOCKS = defaultdict(Lock) +TDB_LOCKS = defaultdict(RLock) TDBOptions = namedtuple('TdbFileOptions', ['backend', 'data_type']) TDB_HANDLES = {} @@ -292,6 +292,12 @@ def __init__( tdb_flags = tdb.DEFAULT open_flags = os.O_RDWR open_mode = 0o600 + case 'group_mapping.tdb' | 'group_mapping_rejects.tdb': + tdb_flags = tdb.DEFAULT + open_flags = os.O_RDWR + self.keys_null_terminated = True + open_flags = os.O_CREAT | os.O_RDWR + open_mode = 0o600 case _: tdb_flags = tdb.DEFAULT # Typically tdb files will have NULL-terminated keys