diff --git a/tests/test_repository_lib.py b/tests/test_repository_lib.py index 96dcb0e0e0..c9269f4ddd 100755 --- a/tests/test_repository_lib.py +++ b/tests/test_repository_lib.py @@ -458,6 +458,64 @@ def test_generate_targets_metadata(self): False, use_existing_fileinfo=True) + def test_build_rsa_acc(self): + temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) + storage_backend = securesystemslib.storage.FilesystemBackend() + version = 1 + + # Test an rsa accumulator with a few nodes to verify the output + + test_nodes = {} + test_nodes['file1'] = tuf.formats.make_metadata_fileinfo(5, None, None) + + + root_1, leaves = repo_lib._build_rsa_acc(test_nodes) + repo_lib._write_rsa_proofs(root_1, leaves, storage_backend, + temporary_directory, version) + + # Ensure that the paths are written to the directory + file_path = os.path.join(temporary_directory, 'file1-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + file_path = os.path.join(temporary_directory, '1.file1-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + test_nodes = {} + test_nodes['targets'] = tuf.formats.make_metadata_fileinfo(1, None, None) + test_nodes['role1'] = tuf.formats.make_metadata_fileinfo(1, None, None) + test_nodes['role2'] = tuf.formats.make_metadata_fileinfo(1, None, None) + + root, leaves = repo_lib._build_rsa_acc(test_nodes) + repo_lib._write_rsa_proofs(root, leaves, storage_backend, + temporary_directory, version) + + # Ensure that the paths are written to the directory + file_path = os.path.join(temporary_directory, 'targets-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + file_path = os.path.join(temporary_directory, '2.targets-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + file_path = os.path.join(temporary_directory, 'role1-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + file_path = os.path.join(temporary_directory, '1.role1-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + file_path = os.path.join(temporary_directory, 'role2-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + file_path = os.path.join(temporary_directory, '1.role2-snapshot.json') + self.assertTrue(os.path.exists(file_path)) + + # TODO: check against the correct root value + self.assertEqual(root_1, 5) + self.assertEqual(root, 5) + + + + + def _setup_generate_snapshot_metadata_test(self): # Test normal case. temporary_directory = tempfile.mkdtemp(dir=self.temporary_directory) @@ -499,7 +557,7 @@ def test_generate_snapshot_metadata(self): metadata_directory, version, expiration_date, storage_backend = \ self._setup_generate_snapshot_metadata_test() - snapshot_metadata = \ + snapshot_metadata, _ = \ repo_lib.generate_snapshot_metadata(metadata_directory, version, expiration_date, storage_backend, @@ -527,7 +585,7 @@ def test_generate_snapshot_metadata_with_length(self): metadata_directory, version, expiration_date, storage_backend = \ self._setup_generate_snapshot_metadata_test() - snapshot_metadata = \ + snapshot_metadata, _ = \ repo_lib.generate_snapshot_metadata(metadata_directory, version, expiration_date, storage_backend, @@ -558,7 +616,7 @@ def test_generate_snapshot_metadata_with_hashes(self): metadata_directory, version, expiration_date, storage_backend = \ self._setup_generate_snapshot_metadata_test() - snapshot_metadata = \ + snapshot_metadata, _ = \ repo_lib.generate_snapshot_metadata(metadata_directory, version, expiration_date, storage_backend, @@ -589,7 +647,7 @@ def test_generate_snapshot_metadata_with_hashes_and_length(self): metadata_directory, version, expiration_date, storage_backend = \ self._setup_generate_snapshot_metadata_test() - snapshot_metadata = \ + snapshot_metadata, _ = \ repo_lib.generate_snapshot_metadata(metadata_directory, version, expiration_date, storage_backend, diff --git a/tests/test_repository_tool.py b/tests/test_repository_tool.py index be0333c351..519b7bb207 100755 --- a/tests/test_repository_tool.py +++ b/tests/test_repository_tool.py @@ -247,6 +247,21 @@ def test_writeall(self): # Verify that status() does not raise an exception. repository.status() + # Test writeall with generating a snapshot RSA accumulator + repository.mark_dirty(['role1', 'targets', 'root', 'snapshot', 'timestamp']) + repository.writeall(rsa_acc=True) + + # Were the RSA proof snapshots written? + targets_snapshot_filepath = os.path.join(metadata_directory, + 'targets-snapshot.json') + targets_snapshot = securesystemslib.util.load_json_file(targets_snapshot_filepath) + tuf.formats.SNAPSHOT_RSA_ACC_SCHEMA.check_match(targets_snapshot) + + # Does timestamp have the root hash? + timestamp_filepath = os.path.join(metadata_directory, 'timestamp.json') + timestamp = securesystemslib.util.load_json_file(timestamp_filepath) + timestamp['signed']['rsa_acc'] + # Verify that status() does not raise # 'tuf.exceptions.InsufficientKeysError' if a top-level role # does not contain a threshold of keys. @@ -488,10 +503,13 @@ def test_get_filepaths_in_directory(self): # Construct list of file paths expected, determining absolute paths. expected_files = [] for filepath in ['1.root.json', 'root.json', 'targets.json', - 'snapshot.json', 'timestamp.json', 'role1.json', 'role2.json']: + 'snapshot.json', 'timestamp.json', 'role1.json', 'role2.json', + 'targets-snapshot.json', 'timestamp-rsa.json', + 'role1-snapshot.json', 'role2-snapshot.json']: expected_files.append(os.path.abspath(os.path.join( 'repository_data', 'repository', 'metadata', filepath))) + print(sorted(metadata_files)) self.assertEqual(sorted(expected_files), sorted(metadata_files)) diff --git a/tests/test_updater.py b/tests/test_updater.py index 0c28e6ca5f..2319c69901 100755 --- a/tests/test_updater.py +++ b/tests/test_updater.py @@ -1771,6 +1771,35 @@ def test_13__targets_of_role(self): + def test_snapshot_rsa_acc(self): + # replace timestamp with an RSA accumulator timestamp and create the updater + rsa_acc_timestamp = os.path.join(self.repository_directory, 'metadata', 'timestamp-rsa.json') + timestamp = os.path.join(self.repository_directory, 'metadata', 'timestamp.json') + + shutil.move(rsa_acc_timestamp, timestamp) + + repository_updater = updater.Updater(self.repository_name, + self.repository_mirrors) + repository_updater.refresh() + + # Test verify RSA accumulator proof + snapshot_info = repository_updater.verify_rsa_acc_proof('targets') + self.assertEqual(snapshot_info['version'], 1) + + snapshot_info = repository_updater.verify_rsa_acc_proof('role1') + self.assertEqual(snapshot_info['version'], 1) + + # verify RSA accumulator with invalid role + self.assertRaises(tuf.exceptions.NoWorkingMirrorError, + repository_updater.verify_rsa_acc_proof, 'foo') + + # Test get_one_valid_targetinfo with snapshot RSA accumulator + repository_updater.get_one_valid_targetinfo('file1.txt') + + + + + class TestMultiRepoUpdater(unittest_toolbox.Modified_TestCase): def setUp(self): diff --git a/tuf/client/updater.py b/tuf/client/updater.py index ffb38dcb30..e11ad8318c 100755 --- a/tuf/client/updater.py +++ b/tuf/client/updater.py @@ -1087,8 +1087,12 @@ def refresh(self, unsafely_update_root_if_necessary=True): # require strict checks on its required length. self._update_metadata('timestamp', DEFAULT_TIMESTAMP_UPPERLENGTH) - self._update_metadata_if_changed('snapshot', - referenced_metadata='timestamp') + if 'rsa_acc' not in self.metadata['current']['timestamp']: + # If an RSA Accumulator is defined, do not update snapshot metadata. Instead, + # we will download the relevant proof files later when downloading + # a target. + self._update_metadata_if_changed('snapshot', + referenced_metadata='timestamp') self._update_metadata_if_changed('targets') @@ -1616,6 +1620,206 @@ def _get_metadata_file(self, metadata_role, remote_filename, + def signable_verification(self, metadata_role, file_object, expected_version): + # Verify 'file_object' according to the callable function. + # 'file_object' is also verified if decompressed above (i.e., the + # uncompressed version). + metadata_signable = \ + securesystemslib.util.load_json_string(file_object.read().decode('utf-8')) + + # Determine if the specification version number is supported. It is + # assumed that "spec_version" is in (major.minor.fix) format, (for + # example: "1.4.3") and that releases with the same major version + # number maintain backwards compatibility. Consequently, if the major + # version number of new metadata equals our expected major version + # number, the new metadata is safe to parse. + try: + metadata_spec_version = metadata_signable['signed']['spec_version'] + metadata_spec_version_split = metadata_spec_version.split('.') + metadata_spec_major_version = int(metadata_spec_version_split[0]) + metadata_spec_minor_version = int(metadata_spec_version_split[1]) + + code_spec_version_split = tuf.SPECIFICATION_VERSION.split('.') + code_spec_major_version = int(code_spec_version_split[0]) + code_spec_minor_version = int(code_spec_version_split[1]) + + if metadata_spec_major_version != code_spec_major_version: + raise tuf.exceptions.UnsupportedSpecificationError( + 'Downloaded metadata that specifies an unsupported ' + 'spec_version. This code supports major version number: ' + + repr(code_spec_major_version) + '; however, the obtained ' + 'metadata lists version number: ' + str(metadata_spec_version)) + + #report to user if minor versions do not match, continue with update + if metadata_spec_minor_version != code_spec_minor_version: + logger.info("Downloaded metadata that specifies a different minor " + + "spec_version. This code has version " + + str(tuf.SPECIFICATION_VERSION) + + " and the metadata lists version number " + + str(metadata_spec_version) + + ". The update will continue as the major versions match.") + + except (ValueError, TypeError) as error: + raise exceptions.FormatError('Improperly' + ' formatted spec_version, which must be in major.minor.fix format'), + error) + + # If the version number is unspecified, ensure that the version number + # downloaded is greater than the currently trusted version number for + # 'metadata_role'. + version_downloaded = metadata_signable['signed']['version'] + + if expected_version is not None: + # Verify that the downloaded version matches the version expected by + # the caller. + if version_downloaded != expected_version: + raise tuf.exceptions.BadVersionNumberError('Downloaded' + ' version number: ' + repr(version_downloaded) + '. Version' + ' number MUST be: ' + repr(expected_version)) + + # The caller does not know which version to download. Verify that the + # downloaded version is at least greater than the one locally + # available. + else: + # Verify that the version number of the locally stored + # 'timestamp.json', if available, is less than what was downloaded. + # Otherwise, accept the new timestamp with version number + # 'version_downloaded'. + + try: + current_version = \ + self.metadata['current'][metadata_role]['version'] + + if version_downloaded < current_version: + raise tuf.exceptions.ReplayedMetadataError(metadata_role, + version_downloaded, current_version) + + except KeyError: + logger.info(metadata_role + ' not available locally.') + + self._verify_metadata_file(file_object, metadata_role) + + + + + + + def _update_rsa_acc_metadata(self, proof_filename, upperbound_filelength, + version=None): + """ + + Non-public method that downloads, verifies, and 'installs' the proof + metadata belonging to 'proof_filename'. Calling this method implies + that the 'proof_filename' on the repository is newer than the client's, + and thus needs to be re-downloaded. The current and previous metadata + stores are updated if the newly downloaded metadata is successfully + downloaded and verified. This method also assumes that the store of + top-level metadata is the latest and exists. + + + proof_filename: + The name of the metadata. This is an RSA accumulator proof file and should + not end in '.json'. Examples: 'role1-snapshot', 'targets-snapshot' + + upperbound_filelength: + The expected length, or upper bound, of the metadata file to be + downloaded. + + version: + The expected and required version number of the 'proof_filename' file + downloaded. 'version' is an integer. + + + tuf.exceptions.NoWorkingMirrorError: + The metadata cannot be updated. This is not specific to a single + failure but rather indicates that all possible ways to update the + metadata have been tried and failed. + + + The metadata file belonging to 'proof_filename' is downloaded from a + repository mirror. If the metadata is valid, it is stored in the + metadata store. + + + None. + """ + + # Construct the metadata filename as expected by the download/mirror + # modules. + metadata_filename = proof_filename + '.json' + + # Attempt a file download from each mirror until the file is downloaded and + # verified. If the signature of the downloaded file is valid, proceed, + # otherwise log a warning and try the next mirror. 'metadata_file_object' + # is the file-like object returned by 'download.py'. 'metadata_signable' + # is the object extracted from 'metadata_file_object'. Metadata saved to + # files are regarded as 'signable' objects, conformant to + # 'tuf.formats.SIGNABLE_SCHEMA'. + # + # Some metadata (presently timestamp) will be downloaded "unsafely", in the + # sense that we can only estimate its true length and know nothing about + # its version. This is because not all metadata will have other metadata + # for it; otherwise we will have an infinite regress of metadata signing + # for each other. In this case, we will download the metadata up to the + # best length we can get for it, not request a specific version, but + # perform the rest of the checks (e.g., signature verification). + + remote_filename = metadata_filename + filename_version = '' + + if self.consistent_snapshot and version: + filename_version = version + dirname, basename = os.path.split(remote_filename) + remote_filename = os.path.join( + dirname, str(filename_version) + '.' + basename) + + verification_fn = None + + metadata_file_object = \ + self._get_metadata_file(proof_filename, remote_filename, + upperbound_filelength, version, verification_fn) + + # The metadata has been verified. Move the metadata file into place. + # First, move the 'current' metadata file to the 'previous' directory + # if it exists. + current_filepath = os.path.join(self.metadata_directory['current'], + metadata_filename) + current_filepath = os.path.abspath(current_filepath) + securesystemslib.util.ensure_parent_dir(current_filepath) + + previous_filepath = os.path.join(self.metadata_directory['previous'], + metadata_filename) + previous_filepath = os.path.abspath(previous_filepath) + + if os.path.exists(current_filepath): + # Previous metadata might not exist, say when delegations are added. + securesystemslib.util.ensure_parent_dir(previous_filepath) + shutil.move(current_filepath, previous_filepath) + + # Next, move the verified updated metadata file to the 'current' directory. + metadata_file_object.seek(0) + updated_metadata_object = \ + securesystemslib.util.load_json_string(metadata_file_object.read().decode('utf-8')) + + securesystemslib.util.persist_temp_file(metadata_file_object, current_filepath) + + # Extract the metadata object so we can store it to the metadata store. + # 'current_metadata_object' set to 'None' if there is not an object + # stored for 'proof_filename'. + current_metadata_object = self.metadata['current'].get(proof_filename) + + # Finally, update the metadata and fileinfo stores, and rebuild the + # key and role info for the top-level roles if 'proof_filename' is root. + # Rebuilding the key and role info is required if the newly-installed + # root metadata has revoked keys or updated any top-level role information. + logger.debug('Updated ' + repr(current_filepath) + '.') + self.metadata['previous'][proof_filename] = current_metadata_object + self.metadata['current'][proof_filename] = updated_metadata_object + + + + + def _update_metadata(self, metadata_role, upperbound_filelength, version=None): """ @@ -1732,6 +1936,87 @@ def _update_metadata(self, metadata_role, upperbound_filelength, version=None): + def verify_rsa_acc_proof(self, metadata_role, version=None, rsa_acc=None): + """ + + Download the RSA accumulator proof associated with metadata_role and verify the hashes. + + metadata_role: + The name of the metadata role. This should not include a file extension. + + tuf.exceptions.RepositoryError: + If the snapshot rsa accumulator file is invalid or the verification fails + + A dictionary containing the snapshot information about metadata role, + conforming to VERSIONINFO_SCHEMA or METADATA_FILEINFO_SCHEMA + """ + + # Modulus from https://en.wikipedia.org/wiki/RSA_numbers#RSA-2048 + # We will want to generate a new one + # This is duplicate code from repo lib, should live somewhere else + Modulus = "2519590847565789349402718324004839857142928212620403202777713783604366202070759555626401852588078" + \ + "4406918290641249515082189298559149176184502808489120072844992687392807287776735971418347270261896375014971" + \ + "8246911650776133798590957000973304597488084284017974291006424586918171951187461215151726546322822168699875" + \ + "4918242243363725908514186546204357679842338718477444792073993423658482382428119816381501067481045166037730" + \ + "6056201619676256133844143603833904414952634432190114657544454178424020924616515723350778707749817125772467" + \ + "962926386356373289912154831438167899885040445364023527381951378636564391212010397122822120720357" + m = int(Modulus, 10) + + + if not rsa_acc: + rsa_acc = self.metadata['current']['timestamp']['rsa_acc'] + + metadata_rolename = metadata_role + '-snapshot' + + # Download RSA accumulator proof + upperbound_filelength = tuf.settings.MERKLE_FILELENGTH + self._update_rsa_acc_metadata(metadata_rolename, upperbound_filelength, version) + metadata_directory = self.metadata_directory['current'] + metadata_filename = metadata_rolename + '.json' + metadata_filepath = os.path.join(metadata_directory, metadata_filename) + + # Ensure the metadata path is valid/exists, else ignore the call. + if not os.path.exists(metadata_filepath): + # No RSA accumulator proof found + raise tuf.exceptions.RepositoryError('No snapshot rsa accumulator proof file for ' + + metadata_role) + try: + snapshot_rsa_acc_proof = securesystemslib.util.load_json_file( + metadata_filepath) + + # Although the metadata file may exist locally, it may not + # be a valid json file. On the next refresh cycle, it will be + # updated as required. If Root if cannot be loaded from disk + # successfully, an exception should be raised by the caller. + except securesystemslib.exceptions.Error: + return + + # check the format + tuf.formats.SNAPSHOT_RSA_ACC_SCHEMA.check_match(snapshot_rsa_acc_proof) + + # canonicalize the contents to determine the RSA accumulator prime + contents = snapshot_rsa_acc_proof['leaf_contents'] + json_contents = securesystemslib.formats.encode_canonical(contents) + + prime = repository_lib.hash_to_prime(json_contents) + + # RSA accumulator proof + proof = snapshot_rsa_acc_proof['rsa_acc_proof'] + rsa_acc_proof_test = pow(proof, prime, m) + + # Does the result match the RSA accumulator? + if rsa_acc_proof_test != rsa_acc: + raise tuf.exceptions.RepositoryError('RSA accumulator ' + rsa_acc + + ' does not match the proof ' + proof + ' for ' + metadata_role) + + # return the verified snapshot contents + return contents + + + + + + def _update_metadata_if_changed(self, metadata_role, referenced_metadata='snapshot'): """ @@ -1801,7 +2086,9 @@ def _update_metadata_if_changed(self, metadata_role, # Ensure the referenced metadata has been loaded. The 'root' role may be # updated without having 'snapshot' available. - if referenced_metadata not in self.metadata['current']: + # When a snapshot rsa accumulator is used, there will not be a snapshot file. + # Instead, if the snapshot rsa proof is missing, this will error below. + if 'rsa_acc' not in self.metadata['current']['timestamp'] and referenced_metadata not in self.metadata['current']: raise exceptions.RepositoryError('Cannot update' ' ' + repr(metadata_role) + ' because ' + referenced_metadata + ' is' ' missing.') @@ -1813,12 +2100,18 @@ def _update_metadata_if_changed(self, metadata_role, repr(referenced_metadata)+ '. ' + repr(metadata_role) + ' may be updated.') - # Simply return if the metadata for 'metadata_role' has not been updated, - # according to the uncompressed metadata provided by the referenced - # metadata. The metadata is considered updated if its version number is - # strictly greater than its currently trusted version number. - expected_versioninfo = self.metadata['current'][referenced_metadata] \ - ['meta'][metadata_filename] + if 'rsa_acc' in self.metadata['current']['timestamp']: + # Download version information from RSA accumulator proof + contents = self.verify_rsa_acc_proof(metadata_role) + expected_versioninfo = contents + + else: + # Simply return if the metadata for 'metadata_role' has not been updated, + # according to the uncompressed metadata provided by the referenced + # metadata. The metadata is considered updated if its version number is + # strictly greater than its currently trusted version number. + expected_versioninfo = self.metadata['current'][referenced_metadata] \ + ['meta'][metadata_filename] if not self._versioninfo_has_been_updated(metadata_filename, expected_versioninfo): @@ -2388,7 +2681,10 @@ def _refresh_targets_metadata(self, rolename='targets', roles_to_update = [] - if rolename + '.json' in self.metadata['current']['snapshot']['meta']: + # Add the role if it is listed in snapshot. If a snapshot rsa + # accumulator is used, the snapshot check will be done later when + # the proof is verified + if 'rsa_acc' in self.metadata['current']['timestamp'] or rolename + '.json' in self.metadata['current']['snapshot']['meta']: roles_to_update.append(rolename) if refresh_all_delegated_roles: diff --git a/tuf/formats.py b/tuf/formats.py index ca304ca9e4..54abd5dce7 100755 --- a/tuf/formats.py +++ b/tuf/formats.py @@ -358,6 +358,11 @@ targets = FILEDICT_SCHEMA, delegations = SCHEMA.Optional(DELEGATIONS_SCHEMA)) +SNAPSHOT_RSA_ACC_SCHEMA = SCHEMA.Object( + leaf_contents = SCHEMA.OneOf([VERSIONINFO_SCHEMA, + METADATA_FILEINFO_SCHEMA]), + rsa_acc_proof = SCHEMA.AnyString()) + # Snapshot role: indicates the latest versions of all metadata (except # timestamp). SNAPSHOT_SCHEMA = SCHEMA.Object( @@ -375,7 +380,8 @@ spec_version = SPECIFICATION_VERSION_SCHEMA, version = METADATAVERSION_SCHEMA, expires = sslib_formats.ISO8601_DATETIME_SCHEMA, - meta = FILEINFODICT_SCHEMA) + meta = FILEINFODICT_SCHEMA, + rsa_acc = SCHEMA.Optional(HASH_SCHEMA)) # project.cfg file: stores information about the project in a json dictionary diff --git a/tuf/repository_lib.py b/tuf/repository_lib.py index 642447d8b3..1591ac9528 100644 --- a/tuf/repository_lib.py +++ b/tuf/repository_lib.py @@ -30,6 +30,8 @@ import shutil import json import tempfile +from pyblake2 import blake2b +import random import securesystemslib # pylint: disable=unused-import from securesystemslib import exceptions as sslib_exceptions @@ -90,7 +92,7 @@ def _generate_and_write_metadata(rolename, metadata_filename, increment_version_number=True, repository_name='default', use_existing_fileinfo=False, use_timestamp_length=True, use_timestamp_hashes=True, use_snapshot_length=False, - use_snapshot_hashes=False): + use_snapshot_hashes=False, rsa_acc=False): """ Non-public function that can generate and write the metadata for the specified 'rolename'. It also increments the version number of 'rolename' if @@ -117,11 +119,21 @@ def _generate_and_write_metadata(rolename, metadata_filename, elif rolename == 'snapshot': - metadata = generate_snapshot_metadata(metadata_directory, + metadata, fileinfodict = generate_snapshot_metadata(metadata_directory, roleinfo['version'], roleinfo['expires'], storage_backend, consistent_snapshot, repository_name, use_length=use_snapshot_length, use_hashes=use_snapshot_hashes) + if rsa_acc: + root, leaves = _build_rsa_acc(fileinfodict) + + # Add the rsa accumulator to the timestamp roleinfo + timestamp_roleinfo = roledb.get_roleinfo('timestamp', repository_name) + timestamp_roleinfo['rsa_acc'] = root + + roledb.update_roleinfo('timestamp', timestamp_roleinfo, + repository_name=repository_name) + _log_warning_if_expires_soon(SNAPSHOT_FILENAME, roleinfo['expires'], SNAPSHOT_EXPIRES_WARN_SECONDS) @@ -133,7 +145,8 @@ def _generate_and_write_metadata(rolename, metadata_filename, metadata = generate_timestamp_metadata(snapshot_file_path, roleinfo['version'], roleinfo['expires'], storage_backend, repository_name, - use_length=use_timestamp_length, use_hashes=use_timestamp_hashes) + use_length=use_timestamp_length, use_hashes=use_timestamp_hashes, + roleinfo=roleinfo) _log_warning_if_expires_soon(TIMESTAMP_FILENAME, roleinfo['expires'], TIMESTAMP_EXPIRES_WARN_SECONDS) @@ -180,6 +193,9 @@ def _generate_and_write_metadata(rolename, metadata_filename, else: logger.debug('Not incrementing ' + repr(rolename) + '\'s version number.') + if rolename == 'snapshot' and rsa_acc: + _write_rsa_proofs(root, leaves, storage_backend, metadata_directory, metadata['version']) + if rolename in roledb.TOP_LEVEL_ROLES and not allow_partially_signed: # Verify that the top-level 'rolename' is fully signed. Only a delegated # role should not be written to disk without full verification of its @@ -378,6 +394,8 @@ def _delete_obsolete_metadata(metadata_directory, snapshot_metadata, for metadata_role in metadata_files: if metadata_role.endswith('root.json'): continue + if metadata_role.endswith('-snapshot.json'): + continue metadata_path = os.path.join(metadata_directory, metadata_role) @@ -1541,6 +1559,168 @@ def _get_hashes_and_length_if_needed(use_length, use_hashes, full_file_path, + +# I couldn't find a currently maintained python library for this, so +# implementing it here. It would be better to implement this in c, +# even better to use an existing library +# This is inspired by: https://www.literateprograms.org/miller-rabin_primality_test__python_.html +def miller_rabin_round(a, s, d, n): + a_to_power = pow(a, d, n) + if a_to_power == 1: + return True + for i in range(s): + if a_to_power == n - 1: + return True + a_to_power = (a_to_power * a_to_power) % n + return False + + + + + +def miller_rabin(n, rounds): + if n == 1: + return false + if n == 2 or n == 3: + return true + + d = n -1 + s = 0 + while d % 2 == 0: + d = d >> 1 + s = s + 1 + + for i in range(rounds): + a = random.randrange(n) + if not miller_rabin_round(a, s, d, n): + return False + return True + + + + +# RSA Accumulator code insprired by https://github.com/ElrondNetwork/elrond-go/blob/v1.0.30/crypto/accumulator/rsa/rsaAcc.go +def hash_to_prime(data): + # TODO: move constant definitions + basesMillerRabin = 12 + + h = blake2b(str(data).encode('utf-8')) + p = int(h.hexdigest(), 16) + + # use Miller-Rabin primality test, if p is not prime, do more rounds of hashing + while (not miller_rabin(p, basesMillerRabin)): + h = blake2b(str(p).encode('utf-8')) + p = int(h.hexdigest(), 16) + + return p + + + +class acc_contents(object): + contents = None + name = None + proof = None + + def __init__(self, name, contents): + # Include the name to ensure the digest differs between elements and cannot be replayed + contents["name"] = name + self.contents = contents + self.name = name + + def set_proof(self, proof): + self.proof = proof + + + +def _build_rsa_acc(fileinfodict): + """ + Create an RSA accululator from the snapshot fileinfo and writes it to individual snapshot files + + Returns the root and leaves + """ + + # RSA accululator contants + # TODO: move constant definitions + g = 3 + # Modulus from https://en.wikipedia.org/wiki/RSA_numbers#RSA-2048 + # We will want to generate a new one + Modulus = "2519590847565789349402718324004839857142928212620403202777713783604366202070759555626401852588078" + \ + "4406918290641249515082189298559149176184502808489120072844992687392807287776735971418347270261896375014971" + \ + "8246911650776133798590957000973304597488084284017974291006424586918171951187461215151726546322822168699875" + \ + "4918242243363725908514186546204357679842338718477444792073993423658482382428119816381501067481045166037730" + \ + "6056201619676256133844143603833904414952634432190114657544454178424020924616515723350778707749817125772467" + \ + "962926386356373289912154831438167899885040445364023527381951378636564391212010397122822120720357" + m = int(Modulus, 10) + + + # We will build the accumulator starting with the leaf nodes. Each + # leaf contains snapshot information for a single metadata file. + leaves = [] + primes = [] + acc_exp = 1 + for name, contents in sorted(fileinfodict.items()): + if name.endswith(".json"): + name = os.path.splitext(name)[0] + cont = acc_contents(name, contents) + leaves.append(cont) + + json_contents = securesystemslib.formats.encode_canonical(contents) + prime = hash_to_prime(json_contents) + primes.append(prime) + acc_exp = acc_exp * prime + + acc = pow(g, acc_exp, m) + + proofs = [] + for i in range(len(leaves)): + proof_exp = acc_exp/primes[i] + proof = pow(g, int(proof_exp), m) + proofs.append(proof) + leaves[i].set_proof(proof) + + + root = acc + + # Return the root (the total accumulator) and the leaves. The root must be used along with + # the proof. The root hash should be securely sent to + # each client. To do so, we will add it to the timestamp metadata. + # The leaves will be used for verification + return root, leaves + +def _write_rsa_proofs(root, leaves, storage_backend, rsa_acc_directory, version): + # The root and leaves must be part of the same fully constructed + # RSA accumulator. + # The contents and proof will be downloaded by + # the client and used for verification. + + # Before writing each leaf, make sure the storage_backend + # is instantiated + if storage_backend is None: + storage_backend = securesystemslib.storage.FilesystemBackend() + + for l in leaves: + # Write the leaf to the rsa_acc_directory + file_contents = formats.build_dict_conforming_to_schema( + formats.SNAPSHOT_RSA_ACC_SCHEMA, + leaf_contents=l.contents, + rsa_acc_proof=str(l.proof)) + file_content = _get_written_metadata(file_contents) + file_object = tempfile.TemporaryFile() + file_object.write(file_content) + filename = os.path.join(rsa_acc_directory, l.name + '-snapshot.json') + + # Also write with consistent snapshots for auditing and client verification + consistent_filename = os.path.join(rsa_acc_directory, str(version) + '.' + + l.name + '-snapshot.json') + securesystemslib.util.persist_temp_file(file_object, consistent_filename, + should_close=False) + + storage_backend.put(file_object, filename) + file_object.close() + + + + def generate_snapshot_metadata(metadata_directory, version, expiration_date, storage_backend, consistent_snapshot=False, repository_name='default', use_length=False, use_hashes=False): @@ -1683,19 +1863,22 @@ def generate_snapshot_metadata(metadata_directory, version, expiration_date, # generate_root_metadata, etc. with one function that generates # metadata, possibly rolling that upwards into the calling function. # There are very few things that really need to be done differently. - return formats.build_dict_conforming_to_schema( + metadata = formats.build_dict_conforming_to_schema( formats.SNAPSHOT_SCHEMA, version=version, expires=expiration_date, meta=fileinfodict) + return metadata, fileinfodict + def generate_timestamp_metadata(snapshot_file_path, version, expiration_date, - storage_backend, repository_name, use_length=True, use_hashes=True): + storage_backend, repository_name, use_length=True, use_hashes=True, + roleinfo=None): """ Generate the timestamp metadata object. The 'snapshot.json' file must @@ -1733,6 +1916,10 @@ def generate_timestamp_metadata(snapshot_file_path, version, expiration_date, metadata file in the timestamp metadata. Default is True. + roleinfo: + The roleinfo for the timestamp role. This is used when an RSA + accumulator is used. + securesystemslib.exceptions.FormatError, if the generated timestamp metadata object cannot be formatted correctly, or one of the arguments is improperly @@ -1768,6 +1955,15 @@ def generate_timestamp_metadata(snapshot_file_path, version, expiration_date, formats.make_metadata_fileinfo(snapshot_version['version'], length, hashes) + if roleinfo and 'rsa_acc' in roleinfo: + rsa_acc = roleinfo['rsa_acc'] + return formats.build_dict_conforming_to_schema( + formats.TIMESTAMP_SCHEMA, + version=version, + expires=expiration_date, + meta=snapshot_fileinfo, + rsa_acc=str(rsa_acc)) + # Generate the timestamp metadata object. # Use generalized build_dict_conforming_to_schema func to produce a dict that # contains all the appropriate information for timestamp metadata, diff --git a/tuf/repository_tool.py b/tuf/repository_tool.py index 0c1ac272d7..d667fe14ad 100755 --- a/tuf/repository_tool.py +++ b/tuf/repository_tool.py @@ -256,7 +256,7 @@ def __init__(self, repository_directory, metadata_directory, - def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): + def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False, rsa_acc=False): """ Write all the JSON Metadata objects to their corresponding files for @@ -286,6 +286,10 @@ def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): written as-is (True) or whether hashes should be generated (False, requires access to the targets files on-disk). + rsa_acc: + Whether to generate snapshot rsa accululator metadata in addition to snapshot + metadata. + tuf.exceptions.UnsignedMetadataError, if any of the top-level and delegated roles do not have the minimum threshold of signatures. @@ -363,7 +367,8 @@ def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): consistent_snapshot, filenames, repository_name=self._repository_name, use_snapshot_length=self._use_snapshot_length, - use_snapshot_hashes=self._use_snapshot_hashes) + use_snapshot_hashes=self._use_snapshot_hashes, + rsa_acc=rsa_acc) # Generate the 'timestamp.json' metadata file. if 'timestamp' in dirty_rolenames: @@ -372,7 +377,8 @@ def writeall(self, consistent_snapshot=False, use_existing_fileinfo=False): self._storage_backend, consistent_snapshot, filenames, repository_name=self._repository_name, use_timestamp_length=self._use_timestamp_length, - use_timestamp_hashes=self._use_timestamp_hashes) + use_timestamp_hashes=self._use_timestamp_hashes, + rsa_acc=rsa_acc) roledb.unmark_dirty(dirty_rolenames, self._repository_name)