diff --git a/Cargo.lock b/Cargo.lock index 644090c9..8145fb1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.15" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" +checksum = "788bb18e8f61d5d9340b52143f27771daf7e1dccbaf2741621d2493f9debf52e" dependencies = [ "alloy-rlp", "arbitrary", @@ -183,7 +183,6 @@ dependencies = [ "foldhash", "getrandom", "hashbrown 0.15.2", - "hex-literal", "indexmap", "itoa", "k256 0.13.4", @@ -218,7 +217,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -305,9 +304,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" @@ -493,7 +492,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "synstructure", ] @@ -505,7 +504,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -557,13 +556,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -598,7 +597,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -620,7 +619,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "itoa", "matchit", @@ -896,7 +895,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "arbitrary", @@ -1027,9 +1026,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.3" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" +checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" dependencies = [ "jobserver", "libc", @@ -1136,7 +1135,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1165,7 +1164,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "clap", @@ -1189,7 +1188,7 @@ dependencies = [ "fdlimit", "http_api", "http_metrics", - "hyper 1.5.1", + "hyper 1.5.2", "network", "parking_lot 0.12.3", "processor", @@ -1229,7 +1228,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "itertools 0.10.5", ] @@ -1246,7 +1245,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "quote", "syn 1.0.109", @@ -1392,18 +1391,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1420,9 +1419,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" @@ -1537,7 +1536,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1585,7 +1584,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1607,7 +1606,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1636,6 +1635,20 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" version = "2.6.0" @@ -1662,6 +1675,23 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "database" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "dashmap", + "openssl", + "parking_lot 0.12.3", + "r2d2", + "r2d2_sqlite", + "rand", + "rusqlite", + "ssv_types", + "tempfile", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", +] + [[package]] name = "db-key" version = "0.0.5" @@ -1767,7 +1797,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1778,7 +1808,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -1799,7 +1829,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "unicode-xid", ] @@ -1837,7 +1867,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -1980,7 +2010,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2141,7 +2171,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2270,7 +2300,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2292,7 +2322,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2360,7 +2390,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bytes", "discv5 0.9.0", @@ -2527,7 +2557,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2666,6 +2696,17 @@ dependencies = [ "bytes", ] +[[package]] +name = "fastrlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "fdlimit" version = "0.3.0" @@ -2764,7 +2805,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2789,9 +2830,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -2930,7 +2971,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -2940,7 +2981,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", ] @@ -3068,14 +3109,14 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gossipsub" @@ -3110,7 +3151,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "async-channel", "asynchronous-codec", @@ -3291,12 +3332,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-literal" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" - [[package]] name = "hex_fmt" version = "0.3.0" @@ -3506,9 +3541,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", @@ -3530,9 +3565,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", @@ -3555,7 +3590,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -3568,7 +3603,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.31", + "hyper 0.14.32", "native-tls", "tokio", "tokio-native-tls", @@ -3584,7 +3619,7 @@ dependencies = [ "futures-util", "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.1", + "hyper 1.5.2", "pin-project-lite", "tokio", "tower-service", @@ -3728,7 +3763,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -3802,7 +3837,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "rand", "tokio", @@ -3863,7 +3898,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -3907,7 +3942,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bytes", ] @@ -4103,7 +4138,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "arbitrary", "c-kzg", @@ -4153,9 +4188,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libflate" @@ -4500,7 +4535,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.19", + "rustls 0.23.20", "socket2 0.5.8", "thiserror 1.0.69", "tokio", @@ -4540,7 +4575,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -4572,7 +4607,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser", @@ -4743,7 +4778,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4801,7 +4836,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "git-version", "target_info", @@ -4880,7 +4915,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -4928,7 +4963,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "fnv", ] @@ -5004,7 +5039,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5046,7 +5081,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "prometheus", ] @@ -5098,9 +5133,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] @@ -5411,9 +5446,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -5495,7 +5530,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -5680,7 +5715,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall 0.5.8", "smallvec", "windows-targets 0.52.6", ] @@ -5755,35 +5790,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.6", + "thiserror 2.0.9", "ucd-trie", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -5888,7 +5923,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "reqwest", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6023,7 +6058,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -6048,13 +6083,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -6147,9 +6182,9 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.0", - "rustls 0.23.19", + "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.6", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -6165,10 +6200,10 @@ dependencies = [ "rand", "ring 0.17.8", "rustc-hash 2.1.0", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.6", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -6176,9 +6211,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -6190,9 +6225,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -6313,9 +6348,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ "bitflags 2.6.0", ] @@ -6389,7 +6424,7 @@ dependencies = [ "h2", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "hyper-rustls", "hyper-tls", "ipnet", @@ -6548,17 +6583,19 @@ dependencies = [ [[package]] name = "ruint" -version = "1.12.3" +version = "1.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c3cc4c2511671f327125da14133d0c5c5d137f006a1017a16f557bc85b16286" +checksum = "f5ef8fb1dd8de3870cb8400d51b4c2023854bbafd5431a3ac7e7317243e22d2f" dependencies = [ "alloy-rlp", "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", "bytes", - "fastrlp", + "fastrlp 0.3.1", + "fastrlp 0.4.0", "num-bigint", + "num-integer", "num-traits", "parity-scale-codec 3.6.12", "primitive-types 0.12.2", @@ -6644,7 +6681,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.23", + "semver 1.0.24", ] [[package]] @@ -6711,9 +6748,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring 0.17.8", @@ -6743,9 +6780,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" dependencies = [ "web-time", ] @@ -6773,9 +6810,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -6814,7 +6851,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" [[package]] name = "salsa20" @@ -6846,7 +6883,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -6944,9 +6981,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -6963,9 +7000,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "semver-parser" @@ -6988,7 +7025,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "serde", "url", @@ -6996,9 +7033,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -7015,20 +7052,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" dependencies = [ "itoa", "memchr", @@ -7054,7 +7091,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -7460,6 +7497,7 @@ dependencies = [ "base64 0.22.1", "derive_more 1.0.0", "openssl", + "rusqlite", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] @@ -7609,7 +7647,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -7629,9 +7667,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -7658,7 +7696,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -7738,7 +7776,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "async-channel", "futures", @@ -7752,12 +7790,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", "rustix 0.38.42", "windows-sys 0.59.0", @@ -7796,7 +7835,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "quote", "syn 1.0.109", @@ -7813,11 +7852,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.9", ] [[package]] @@ -7828,18 +7867,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -7932,9 +7971,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -7980,7 +8019,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8066,7 +8105,7 @@ checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "toml_datetime", - "winnow 0.6.20", + "winnow 0.6.22", ] [[package]] @@ -8143,7 +8182,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8224,7 +8263,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -8311,7 +8350,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8401,9 +8440,9 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" @@ -8489,7 +8528,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -8628,7 +8667,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "mime", "mime_guess", @@ -8693,7 +8732,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -8728,7 +8767,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9087,9 +9126,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" dependencies = [ "memchr", ] @@ -9235,7 +9274,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "synstructure", ] @@ -9257,7 +9296,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -9277,7 +9316,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", "synstructure", ] @@ -9287,6 +9326,7 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ + "serde", "zeroize_derive", ] @@ -9298,7 +9338,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] @@ -9320,7 +9360,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.95", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f5379b4e..a099c6c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,8 @@ members = [ "anchor/common/qbft", "anchor/common/ssv_types", "anchor/common/version", + "anchor/common/version", + "anchor/database", "anchor/http_api", "anchor/http_metrics", "anchor/network", @@ -21,6 +23,7 @@ client = { path = "anchor/client" } qbft = { path = "anchor/common/qbft" } http_api = { path = "anchor/http_api" } http_metrics = { path = "anchor/http_metrics" } +database = { path = "anchor/database" } network = { path = "anchor/network" } version = { path = "anchor/common/version" } processor = { path = "anchor/processor" } @@ -57,7 +60,9 @@ tokio = { version = "1.39.2", features = [ tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } base64 = "0.22.1" +rusqlite = "0.28.0" openssl = "0.10.68" +dashmap = "6.1.0" [profile.maxperf] inherits = "release" diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml index 5741ce1a..14be4cbe 100644 --- a/anchor/common/ssv_types/Cargo.toml +++ b/anchor/common/ssv_types/Cargo.toml @@ -8,4 +8,5 @@ authors = ["Sigma Prime "] base64 = { workspace = true } derive_more = { workspace = true } openssl = { workspace = true } +rusqlite = { workspace = true } types = { workspace = true } diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 308aee67..ed21c884 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -1,36 +1,40 @@ use crate::OperatorId; -use crate::Share; use derive_more::{Deref, From}; +use std::collections::HashSet; use types::{Address, Graffiti, PublicKey}; /// Unique identifier for a cluster #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] -pub struct ClusterId(pub u64); +pub struct ClusterId(pub [u8; 32]); -/// A Cluster is a group of Operators that are acting on behalf of a Validator +/// A Cluster is a group of Operators that are acting on behalf of one or more Validators +/// +/// Each cluster is owned by a unqiue EOA and only that Address may perform operators on the +/// Cluster. #[derive(Debug, Clone)] pub struct Cluster { /// Unique identifier for a Cluster pub cluster_id: ClusterId, - /// All of the members of this Cluster - pub cluster_members: Vec, + /// The owner of the cluster and all of the validators + pub owner: Address, + /// The Eth1 fee address for all validators in the cluster + pub fee_recipient: Address, /// The number of faulty operator in the Cluster pub faulty: u64, /// If the Cluster is liquidated or active pub liquidated: bool, - /// Metadata about the validator this committee represents - pub validator_metadata: ValidatorMetadata, + /// Operators in this cluster + pub cluster_members: HashSet, } -/// A member of a Cluster. This is just an Operator that holds onto a share of the Validator key +/// A member of a Cluster. +/// This is an Operator that holds a piece of the keyshare for each validator in the cluster #[derive(Debug, Clone)] pub struct ClusterMember { /// Unique identifier for the Operator this member represents pub operator_id: OperatorId, /// Unique identifier for the Cluster this member is a part of pub cluster_id: ClusterId, - /// The Share this member is responsible for - pub share: Share, } /// Index of the validator in the validator registry. @@ -40,14 +44,12 @@ pub struct ValidatorIndex(pub usize); /// General Metadata about a Validator #[derive(Debug, Clone)] pub struct ValidatorMetadata { - /// Index of the validator - pub validator_index: ValidatorIndex, /// Public key of the validator - pub validator_pubkey: PublicKey, - /// Eth1 fee address - pub fee_recipient: Address, + pub public_key: PublicKey, + /// The cluster that is responsible for this validator + pub cluster_id: ClusterId, + /// Index of the validator + pub index: ValidatorIndex, /// Graffiti pub graffiti: Graffiti, - /// The owner of the validator - pub owner: Address, } diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index 6d25f44d..4cf950d1 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -4,4 +4,5 @@ pub use share::Share; mod cluster; mod operator; mod share; +mod sql_conversions; mod util; diff --git a/anchor/common/ssv_types/src/share.rs b/anchor/common/ssv_types/src/share.rs index 80672602..77fefdad 100644 --- a/anchor/common/ssv_types/src/share.rs +++ b/anchor/common/ssv_types/src/share.rs @@ -1,8 +1,15 @@ +use crate::{ClusterId, OperatorId}; use types::PublicKey; /// One of N shares of a split validator key. #[derive(Debug, Clone)] pub struct Share { + /// Public Key of the validator + pub validator_pubkey: PublicKey, + /// Operator this share belongs to + pub operator_id: OperatorId, + /// Cluster the operator who owns this share belongs to + pub cluster_id: ClusterId, /// The public key of this Share pub share_pubkey: PublicKey, /// The encrypted private key of the share diff --git a/anchor/common/ssv_types/src/sql_conversions.rs b/anchor/common/ssv_types/src/sql_conversions.rs new file mode 100644 index 00000000..20ffe847 --- /dev/null +++ b/anchor/common/ssv_types/src/sql_conversions.rs @@ -0,0 +1,160 @@ +use crate::{Cluster, ClusterId, ClusterMember}; +use crate::{Operator, OperatorId}; +use crate::{Share, ValidatorIndex, ValidatorMetadata}; +use base64::prelude::*; +use openssl::rsa::Rsa; +use rusqlite::{types::Type, Error as SqlError, Row}; +use std::io::{Error, ErrorKind}; +use std::str::FromStr; +use types::{Address, Graffiti, PublicKey, GRAFFITI_BYTES_LEN}; + +// Helper for converting to Rustqlite Error +fn from_sql_error( + col: usize, + t: Type, + e: E, +) -> SqlError { + SqlError::FromSqlConversionFailure(col, t, Box::new(e)) +} + +// Conversion from SQL row to an Operator +impl TryFrom<&Row<'_>> for Operator { + type Error = rusqlite::Error; + fn try_from(row: &Row) -> Result { + // Get the OperatorId from column 0 + let id: OperatorId = OperatorId(row.get(0)?); + + // Get the public key from column 1 + let pem_string = row.get::<_, String>(1)?; + let decoded_pem = BASE64_STANDARD + .decode(pem_string) + .map_err(|e| from_sql_error(1, Type::Text, e))?; + let rsa_pubkey = + Rsa::public_key_from_pem(&decoded_pem).map_err(|e| from_sql_error(1, Type::Text, e))?; + + // Get the owner from column 2 + let owner_str = row.get::<_, String>(2)?; + let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(2, Type::Text, e))?; + + Ok(Operator { + id, + rsa_pubkey, + owner, + }) + } +} + +// Conversion from SQL row and cluster members into a Cluster +impl TryFrom<(&Row<'_>, Vec)> for Cluster { + type Error = rusqlite::Error; + + fn try_from( + (row, cluster_members): (&Row<'_>, Vec), + ) -> Result { + // Get ClusterId from column 0 + let cluster_id = ClusterId(row.get(0)?); + + // Get the owner from column 1 + let owner_str = row.get::<_, String>(1)?; + let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(1, Type::Text, e))?; + + // Get the fee_recipient from column 2 + let fee_recipient_str = row.get::<_, String>(2)?; + let fee_recipient = + Address::from_str(&fee_recipient_str).map_err(|e| from_sql_error(2, Type::Text, e))?; + + // Get faulty count from column 3 + let faulty: u64 = row.get(3)?; + + // Get liquidated status from column 4 + let liquidated: bool = row.get(4)?; + + Ok(Cluster { + cluster_id, + owner, + fee_recipient, + faulty, + liquidated, + cluster_members: cluster_members + .into_iter() + .map(|member| member.operator_id) + .collect(), + }) + } +} + +// Conversion from SQL row to a ClusterMember +impl TryFrom<&Row<'_>> for ClusterMember { + type Error = rusqlite::Error; + + fn try_from(row: &Row) -> Result { + // Get ClusterId from column 0 + let cluster_id = ClusterId(row.get(0)?); + + // Get OperatorId from column 1 + let operator_id = OperatorId(row.get(1)?); + + Ok(ClusterMember { + operator_id, + cluster_id, + }) + } +} + +// Conversion from SQL row to ValidatorMetadata +impl TryFrom<&Row<'_>> for ValidatorMetadata { + type Error = SqlError; + fn try_from(row: &Row) -> Result { + // Get public key from column 0 + let validator_pubkey_str = row.get::<_, String>(0)?; + let public_key = PublicKey::from_str(&validator_pubkey_str) + .map_err(|e| from_sql_error(1, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + + // Get ClusterId from column 1 + let cluster_id: ClusterId = ClusterId(row.get(1)?); + + // Get ValidatorIndex from column 2 + let index: ValidatorIndex = ValidatorIndex(row.get(2)?); + + // Get Graffiti from column 3 + let graffiti = Graffiti(row.get::<_, [u8; GRAFFITI_BYTES_LEN]>(3)?); + + Ok(ValidatorMetadata { + public_key, + cluster_id, + index, + graffiti, + }) + } +} + +// Conversion from SQL row into a Share +impl TryFrom<&Row<'_>> for Share { + type Error = rusqlite::Error; + fn try_from(row: &Row) -> Result { + // Get Share PublicKey from column 0 + let share_pubkey_str = row.get::<_, String>(0)?; + let share_pubkey = PublicKey::from_str(&share_pubkey_str) + .map_err(|e| from_sql_error(0, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + + // Get the encrypted private key from column 1 + let encrypted_private_key: [u8; 256] = row.get(1)?; + + // Get the OperatorId from column 2 and ClusterId from column 3 + let operator_id = OperatorId(row.get(2)?); + let cluster_id = ClusterId(row.get(3)?); + + // Get the Validator PublicKey from column 4 + let validator_pubkey_str = row.get::<_, String>(4)?; + let validator_pubkey = PublicKey::from_str(&validator_pubkey_str) + .map_err(|e| from_sql_error(4, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + + Ok(Share { + validator_pubkey, + operator_id, + cluster_id, + share_pubkey, + encrypted_private_key, + }) + } +} diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml new file mode 100644 index 00000000..2f1564d2 --- /dev/null +++ b/anchor/database/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "database" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +base64 = { workspace = true } +dashmap = { workspace = true } +openssl = { workspace = true } +parking_lot = { workspace = true } +r2d2 = "0.8.10" +r2d2_sqlite = "0.21.0" +rusqlite = { workspace = true } +ssv_types = { workspace = true } +types = { workspace = true } + +[dev-dependencies] +rand = "0.8.5" +tempfile = "3.14.0" diff --git a/anchor/database/README.md b/anchor/database/README.md new file mode 100644 index 00000000..6f7908c8 --- /dev/null +++ b/anchor/database/README.md @@ -0,0 +1,64 @@ +# Anchor Database + +The Anchor Database provides a robust persistent and in-memory caching layer for the Anchor project, specifically designed to handle SSV Network data efficiently. This crate manages both persistent storage of blockchain event data and high-performance in-memory access patterns. + +## Table of Contents + +1. [Overview](#overview) +2. [Core Features](#core) +3. [Architecture](#Architecture) +4. [Data Models](#Data) + +## Overview + +The Anchor Database serves as the backbone for storing and accessing SSV Network event data. When an Anchor node starts up, it needs to process and store blockchain event logs to maintain state. + +## Core Features +* **Persistent Storage**: SQLite-based store with automatic schema management +* **In-Memory Caching**: Efficient caching of frequently accessed data +* **Multi-Index Access**: Flexible data access patters through multiple different keys +* **Automatic State Recovery**: Rebuilds in-memory state from persistent storage on startup. +* **Thread Safety**: Concurrent access support through DashMap implementations + + +## Architecture +The database architecture consists of a two key layers + +### Storage Layer + +At the foundation lies a SQLite database that provides persistent storage. This layer encompasses +* **Database Connection Management**: A conneciton pool that maintains and resuses SQLite connections efficiently, preventing resource exhaustion while ensuring consistent access +* **Schema and Transaction Management**: Automatic table creation and transaction support for data integrity + + +### Cache Layer +The in-memory cache layer combines high-performance caching with sophisticated indexing through a unified system. Is is broken up into Single-State and Multi-State. + +* **Single State**: Single state handles straightforward, one-to-one relationships where data only needs one access pattern. This is ideal for data that is frequenlty access but has simple relationships. +* **Multi State**: Multi State handles complex relationships where the same data needs to be accessed through different keys. This is implemented through a series of MultiIndexMaps, each supporting three different access patterns for the same data. The type system enforces correct usage through the UniqueTag and NonUniqueTag markers, preventing incorrect access patterns at compile time. Each MultiIndexMap in the Multi State provides three ways to access its data: + 1) A primary key that uniquely identifies each piece of data + 2) A secondary key that can either uniquely identify data or map to multiple items + 3) A tertiary key that can also be unique or map to multiple items + +## Data Models +The database handles several core data types + +**Operator** +* Represents a network operator +* Identified by OperatorId +* Constains RSA public key and owner address + +**Cluster** +* Represents a group of Operators managing validators +* Contains cluster membership information +* Tracks operational status and fault counts + +**Validator** +* Contains validator metadata +* Links to cluster membership +* Stores configuration data + +**Share** +* Represents cryptographic shares for validators +* Links operators to validators +* Contains encrypted key data diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs new file mode 100644 index 00000000..30919a7c --- /dev/null +++ b/anchor/database/src/cluster_operations.rs @@ -0,0 +1,157 @@ +use super::{DatabaseError, NetworkDatabase, NonUniqueIndex, SqlStatement, UniqueIndex, SQL}; +use rusqlite::params; +use ssv_types::{Cluster, ClusterId, Share, ValidatorMetadata}; +use std::sync::atomic::Ordering; +use types::{Address, PublicKey}; + +/// Implements all cluster related functionality on the database +impl NetworkDatabase { + /// Inserts a new validator into the database. A new cluster will be created if this is the + /// first validator for the cluster + pub fn insert_validator( + &self, + cluster: Cluster, + validator: ValidatorMetadata, + shares: Vec, + ) -> Result<(), DatabaseError> { + let mut conn = self.connection()?; + let tx = conn.transaction()?; + + // Insert the top level cluster data if it does not exist, and the associated validator metadata + tx.prepare_cached(SQL[&SqlStatement::InsertCluster])? + .execute(params![ + *cluster.cluster_id, // cluster id + cluster.owner.to_string(), // owner + cluster.fee_recipient.to_string(), // fee recipient + ])?; + tx.prepare_cached(SQL[&SqlStatement::InsertValidator])? + .execute(params![ + validator.public_key.to_string(), // validator public key + *cluster.cluster_id, // cluster id + *validator.index, // validator index + validator.graffiti.0.as_slice(), // graffiti + ])?; + + // Record shares if one belongs to the current operator + let mut our_share = None; + let own_id = self.state.single_state.id.load(Ordering::Relaxed); + + shares.iter().try_for_each(|share| { + // Check if any of these shares belong to us, meaning we are a member in the cluster + if own_id == *share.operator_id { + our_share = Some(share); + } + + // Insert the cluster member and the share + tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? + .execute(params![*share.cluster_id, *share.operator_id])?; + self.insert_share(&tx, share, &validator.public_key) + })?; + + // Commit all operations to the db + tx.commit()?; + + // If we are a member in this cluster, store membership and our share + if let Some(share) = our_share { + // Record that we are a member of this cluster + self.state.single_state.clusters.insert(cluster.cluster_id); + + // Save the keyshare + self.shares().insert( + &validator.public_key, // The validator this keyshare belongs to + &cluster.cluster_id, // The id of the cluster + &cluster.owner, // The owner of the cluster + share.to_owned(), // The keyshare itself + ); + } + + // Save all cluster related information + self.clusters().insert( + &cluster.cluster_id, // The id of the cluster + &validator.public_key, // The public key of validator added to the cluster + &cluster.owner, // Owner of the cluster + cluster.to_owned(), // The Cluster and all containing information + ); + + // Save the metadata for the validators + self.metadata().insert( + &validator.public_key, // The public key of the validator + &cluster.cluster_id, // The id of the cluster the validator belongs to + &cluster.owner, // The owner of the cluster + validator.to_owned(), // The metadata of the validator + ); + + Ok(()) + } + + /// Mark the cluster as liquidated or active + pub fn update_status(&self, cluster_id: ClusterId, status: bool) -> Result<(), DatabaseError> { + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateClusterStatus])? + .execute(params![ + status, // status of the cluster (liquidated = false, active = true) + *cluster_id // Id of the cluster + ])?; + + // Update in memory status of cluster + if let Some(mut cluster) = self.clusters().get_by(&cluster_id) { + cluster.liquidated = status; + self.clusters().update(&cluster_id, cluster); + } + + Ok(()) + } + + /// Delete a validator from a cluster. This will cascade and remove all corresponding share + /// data for this validator. If this validator is the last one in the cluster, the cluster + /// and all corresponding cluster members will also be removed + pub fn delete_validator(&self, validator_pubkey: &PublicKey) -> Result<(), DatabaseError> { + // Remove from database + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::DeleteValidator])? + .execute(params![validator_pubkey.to_string()])?; + + // Remove from in memory + self.shares().remove(validator_pubkey); + let metadata = self + .metadata() + .remove(validator_pubkey) + .expect("Data should have existed"); + + // If there is no longer and validators for this cluster, remove it from both the cluster + // multi index map and the cluster membership set + if self.metadata().get_all_by(&metadata.cluster_id).is_none() { + self.clusters().remove(&metadata.cluster_id); + self.state + .single_state + .clusters + .remove(&metadata.cluster_id); + } + + Ok(()) + } + + /// Bump the nonce of the owner + pub fn bump_nonce(&self, owner: &Address) -> Result<(), DatabaseError> { + // bump the nonce in the db + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::BumpNonce])? + .execute(params![owner.to_string()])?; + + // bump the nonce in memory + if !self.state.single_state.nonces.contains_key(owner) { + // if it does not yet exist in memory, then create an entry and set it to one + self.state.single_state.nonces.insert(*owner, 1); + } else { + // otherwise, just increment the entry + let mut entry = self + .state + .single_state + .nonces + .get_mut(owner) + .expect("This must exist"); + *entry += 1; + } + Ok(()) + } +} diff --git a/anchor/database/src/error.rs b/anchor/database/src/error.rs new file mode 100644 index 00000000..cbf8468a --- /dev/null +++ b/anchor/database/src/error.rs @@ -0,0 +1,37 @@ +use rusqlite::Error as SQLError; +use std::fmt::Display; +use std::io::{Error as IOError, ErrorKind}; + +#[derive(Debug)] +pub enum DatabaseError { + NotFound(String), + AlreadyPresent(String), + IOError(ErrorKind), + SQLError(String), + SQLPoolError(String), +} + +impl From for DatabaseError { + fn from(error: IOError) -> DatabaseError { + DatabaseError::IOError(error.kind()) + } +} + +impl From for DatabaseError { + fn from(error: SQLError) -> DatabaseError { + DatabaseError::SQLError(error.to_string()) + } +} + +impl From for DatabaseError { + fn from(error: r2d2::Error) -> Self { + // Use `Display` impl to print "timed out waiting for connection" + DatabaseError::SQLPoolError(format!("{}", error)) + } +} + +impl Display for DatabaseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs new file mode 100644 index 00000000..859db7d4 --- /dev/null +++ b/anchor/database/src/lib.rs @@ -0,0 +1,163 @@ +use dashmap::{DashMap, DashSet}; +use openssl::{pkey::Public, rsa::Rsa}; +use r2d2_sqlite::SqliteConnectionManager; +use rusqlite::params; +use ssv_types::{Cluster, ClusterId, Operator, OperatorId, Share, ValidatorMetadata}; +use std::fs::File; +use std::path::Path; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; +use std::time::Duration; +use types::{Address, PublicKey}; + +pub use crate::error::DatabaseError; +pub use crate::multi_index::{MultiIndexMap, *}; +use crate::sql_operations::{SqlStatement, SQL}; + +mod cluster_operations; +mod error; +mod multi_index; +mod operator_operations; +mod share_operations; +mod sql_operations; +mod state; +mod validator_operations; + +#[cfg(test)] +mod tests; + +const POOL_SIZE: u32 = 1; +const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); + +type Pool = r2d2::Pool; +type PoolConn = r2d2::PooledConnection; + +/// All of the shares that belong to the current operator +/// Primary: public key of validator. uniquely identifies share +/// Secondary: cluster id. corresponds to a list of shares +/// Tertiary: owner of the cluster. corresponds to a list of shares +pub(crate) type ShareMultiIndexMap = + MultiIndexMap; +/// Metadata for all validators in the network +/// Primary: public key of the validator. uniquely identifies the metadata +/// Secondary: cluster id. corresponds to list of metadata for all validators +/// Tertiary: owner of the cluster: corresponds to list of metadata for all validators +pub(crate) type MetadataMultiIndexMap = + MultiIndexMap; +/// All of the clusters in the network +/// Primary: cluster id. uniquely identifies a cluster +/// Secondary: public key of the validator. uniquely identifies a cluster +/// Tertiary: owner of the cluster. uniquely identifies a cluster +pub(crate) type ClusterMultiIndexMap = + MultiIndexMap; + +// Information that needs to be accessed via multiple different indicies +#[derive(Debug)] +struct MultiState { + shares: ShareMultiIndexMap, + validator_metadata: MetadataMultiIndexMap, + clusters: ClusterMultiIndexMap, +} + +// General information that can be single index access +#[derive(Debug, Default)] +struct SingleState { + /// The ID of our own operator. This is determined via events when the operator is + /// registered with the network. Therefore, this may not be available right away if the operator + /// is running but has not been registered with the network contract yet. + id: AtomicU64, + /// The last block that was processed + last_processed_block: AtomicU64, + /// All of the operators in the network + operators: DashMap, + /// All of the Clusters that we are a memeber of + clusters: DashSet, + /// Nonce of the owner account + nonces: DashMap, +} + +// Container to hold all network state +#[derive(Debug)] +struct NetworkState { + multi_state: MultiState, + single_state: SingleState, +} + +/// Top level NetworkDatabase that contains in memory storage for quick access +/// to relevant information and a connection to the database +#[derive(Debug)] +pub struct NetworkDatabase { + /// The public key of our operator + pubkey: Rsa, + /// Custom state stores for easy data access + state: NetworkState, + /// Connection to the database + conn_pool: Pool, +} + +impl NetworkDatabase { + /// Construct a new NetworkDatabase at the given path and the Public Key of the current operator + pub fn new(path: &Path, pubkey: &Rsa) -> Result { + let conn_pool = Self::open_or_create(path)?; + let state = NetworkState::new_with_state(&conn_pool, pubkey)?; + Ok(Self { + pubkey: pubkey.clone(), + state, + conn_pool, + }) + } + + /// Update the last processed block number in the database + pub fn processed_block(&self, block_number: u64) -> Result<(), DatabaseError> { + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateBlockNumber])? + .execute(params![block_number])?; + self.state + .single_state + .last_processed_block + .store(block_number, Ordering::Relaxed); + Ok(()) + } + + // Open an existing database at the given `path`, or create one if none exists. + fn open_or_create(path: &Path) -> Result { + if path.exists() { + Self::open_conn_pool(path) + } else { + Self::create(path) + } + } + + // Build a new connection pool + fn open_conn_pool(path: &Path) -> Result { + let manager = SqliteConnectionManager::file(path); + // some other args here + let conn_pool = Pool::builder() + .max_size(POOL_SIZE) + .connection_timeout(CONNECTION_TIMEOUT) + .build(manager)?; + Ok(conn_pool) + } + + // Create a database at the given path. + fn create(path: &Path) -> Result { + let _file = File::options() + .write(true) + .read(true) + .create_new(true) + .open(path)?; + + // restrict file permissions + let conn_pool = Self::open_conn_pool(path)?; + let conn = conn_pool.get()?; + + // create all of the tables + conn.execute_batch(include_str!("table_schema.sql"))?; + Ok(conn_pool) + } + + // Open a new connection + fn connection(&self) -> Result { + Ok(self.conn_pool.get()?) + } +} diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs new file mode 100644 index 00000000..92cf3ec5 --- /dev/null +++ b/anchor/database/src/multi_index.rs @@ -0,0 +1,426 @@ +use dashmap::DashMap; +use std::hash::Hash; +use std::marker::PhantomData; + +/// Marker trait for uniquely identifying indicies +pub trait Unique {} + +/// Marker trait for non-uniquely identifiying indicies +pub trait NotUnique {} + +/// Index type markers +pub enum Primary {} +pub enum Secondary {} +pub enum Tertiary {} + +// Type tags markers +#[derive(Debug)] +pub enum UniqueTag {} +impl Unique for UniqueTag {} + +#[derive(Debug)] +pub enum NonUniqueTag {} +impl NotUnique for NonUniqueTag {} + +/// Trait for accessing values through a unique index +pub trait UniqueIndex { + fn get_by(&self, key: &K) -> Option; +} + +/// Trait for accessing values through a non-unique index +pub trait NonUniqueIndex { + fn get_all_by(&self, key: &K) -> Option>; +} + +#[derive(Debug)] +struct InnerMaps +where + K1: Eq + Hash, + K2: Eq + Hash, + K3: Eq + Hash, +{ + primary: DashMap, + secondary_unique: DashMap, + secondary_multi: DashMap>, + tertiary_unique: DashMap, + tertiary_multi: DashMap>, +} + +/// A concurrent multi-index map that supports up to three different access patterns. +/// The core differentiates between unique identification and non unique identification. The primary +/// index is forced to always uniquely identify the value. The secondary and tertiary indicies have +/// more flexibility. The key may non uniquely identify many different values, or uniquely identify +/// a single value +/// +/// Example: A share is uniquely identified by the Validators public key that it belongs too. A +/// ClusterId does not uniquely identify a share as a cluster contains multiple shares +/// +/// - K1: Primary key type (always unique) +/// - K2: Secondary key type +/// - K3: Tertiary key type +/// - V: Value type +/// - U1: Secondary index uniqueness (Unique or NotUnique) +/// - U2: Tertiary index uniqueness (Unique or NotUnique) +#[derive(Debug)] +pub struct MultiIndexMap +where + K1: Eq + Hash, + K2: Eq + Hash, + K3: Eq + Hash, +{ + maps: InnerMaps, + _marker: PhantomData<(U1, U2)>, +} + +impl Default for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: 'static, + U2: 'static, +{ + fn default() -> Self { + Self { + maps: InnerMaps { + primary: DashMap::new(), + secondary_unique: DashMap::new(), + secondary_multi: DashMap::new(), + tertiary_unique: DashMap::new(), + tertiary_multi: DashMap::new(), + }, + _marker: PhantomData, + } + } +} + +impl MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: 'static, + U2: 'static, +{ + /// Creates a new empty MultiIndexMap + pub fn new() -> Self { + Self { + maps: InnerMaps { + primary: DashMap::new(), + secondary_unique: DashMap::new(), + secondary_multi: DashMap::new(), + tertiary_unique: DashMap::new(), + tertiary_multi: DashMap::new(), + }, + _marker: PhantomData, + } + } + + /// Number of entires in the primary map + pub fn length(&self) -> usize { + self.maps.primary.len() + } + + /// Insert a new value and associated keys into the map + pub fn insert(&self, k1: &K1, k2: &K2, k3: &K3, v: V) { + // Insert into primary map first + self.maps.primary.insert(k1.clone(), v); + + // Handle secondary index based on uniqueness + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.secondary_unique.insert(k2.clone(), k1.clone()); + } else { + self.maps + .secondary_multi + .entry(k2.clone()) + .and_modify(|v| v.push(k1.clone())) + .or_insert_with(|| vec![k1.clone()]); + } + + // Handle tertiary index based on uniqueness + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.tertiary_unique.insert(k3.clone(), k1.clone()); + } else { + self.maps + .tertiary_multi + .entry(k3.clone()) + .and_modify(|v| v.push(k1.clone())) + .or_insert_with(|| vec![k1.clone()]); + } + } + + /// Remove a value and all its indexes using the primary key + pub fn remove(&self, k1: &K1) -> Option { + // Remove from primary storage + let removed = self.maps.primary.remove(k1)?; + + // Remove from secondary index + if std::any::TypeId::of::() == std::any::TypeId::of::() { + // For unique indexes, just remove the entry that points to this k1 + self.maps.secondary_unique.retain(|_, v| v != k1); + } else { + // For non-unique indexes, remove k1 from any vectors it appears in + self.maps.secondary_multi.retain(|_, v| { + v.retain(|x| x != k1); + !v.is_empty() + }); + } + + // Remove from tertiary index + if std::any::TypeId::of::() == std::any::TypeId::of::() { + // For unique indexes, just remove the entry that points to this k1 + self.maps.tertiary_unique.retain(|_, v| v != k1); + } else { + // For non-unique indexes, remove k1 from any vectors it appears in + self.maps.tertiary_multi.retain(|_, v| { + v.retain(|x| x != k1); + !v.is_empty() + }); + } + + Some(removed.1) + } + + /// Update an existing value using the primary key + /// Only updates if the primary key exists, indexes remain unchanged + pub fn update(&self, k1: &K1, new_value: V) -> Option { + if !self.maps.primary.contains_key(k1) { + return None; + } + + // Only update the value in primary storage + self.maps.primary.insert(k1.clone(), new_value) + } +} + +// Implement unique access for primary key +impl UniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, +{ + fn get_by(&self, key: &K1) -> Option { + self.maps.primary.get(key).map(|v| v.value().clone()) + } +} + +// Implement unique access for secondary key +impl UniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: Unique, +{ + fn get_by(&self, key: &K2) -> Option { + let primary_key = self.maps.secondary_unique.get(key)?; + self.maps + .primary + .get(primary_key.value()) + .map(|v| v.value().clone()) + } +} + +// Implement non-unique access for secondary key +impl NonUniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: NotUnique, +{ + fn get_all_by(&self, key: &K2) -> Option> { + self.maps.secondary_multi.get(key).map(|keys| { + keys.value() + .iter() + .filter_map(|k1| self.maps.primary.get(k1).map(|v| v.value().clone())) + .collect() + }) + } +} + +// Implement unique access for tertiary key +impl UniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U2: Unique, +{ + fn get_by(&self, key: &K3) -> Option { + let primary_key = self.maps.tertiary_unique.get(key)?; + self.maps + .primary + .get(primary_key.value()) + .map(|v| v.value().clone()) + } +} + +// Implement non-unique access for tertiary key +impl NonUniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U2: NotUnique, +{ + fn get_all_by(&self, key: &K3) -> Option> { + self.maps.tertiary_multi.get(key).map(|keys| { + keys.value() + .iter() + .filter_map(|k1| self.maps.primary.get(k1).map(|v| v.value().clone())) + .collect() + }) + } +} + +#[cfg(test)] +mod multi_index_tests { + use super::*; + + #[derive(Clone, Debug, PartialEq)] + struct TestValue { + id: i32, + data: String, + } + + #[test] + fn test_basic_operations() { + let map: MultiIndexMap = + MultiIndexMap::new(); + + let value = TestValue { + id: 1, + data: "test".to_string(), + }; + + // Test insertion + map.insert(&1, &"key1".to_string(), &true, value.clone()); + + // Test primary key access + assert_eq!(map.get_by(&1), Some(value.clone())); + + // Test secondary key access + assert_eq!(map.get_by(&"key1".to_string()), Some(value.clone())); + + // Test tertiary key access + assert_eq!(map.get_by(&true), Some(value.clone())); + + // Test update + let new_value = TestValue { + id: 1, + data: "updated".to_string(), + }; + map.update(&1, new_value.clone()); + assert_eq!(map.get_by(&1), Some(new_value.clone())); + + // Test removal + assert_eq!(map.remove(&1), Some(new_value.clone())); + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&"key1".to_string()), None); + assert_eq!(map.get_by(&true), None); + } + + #[test] + fn test_non_unique_indices() { + let map: MultiIndexMap = + MultiIndexMap::new(); + + let value1 = TestValue { + id: 1, + data: "test1".to_string(), + }; + let value2 = TestValue { + id: 2, + data: "test2".to_string(), + }; + + // Insert multiple values with same secondary and tertiary keys + map.insert(&1, &"shared_key".to_string(), &true, value1.clone()); + map.insert(&2, &"shared_key".to_string(), &true, value2.clone()); + + // Test primary key access (still unique) + assert_eq!(map.get_by(&1), Some(value1.clone())); + assert_eq!(map.get_by(&2), Some(value2.clone())); + + // Test secondary key access (non-unique) + let secondary_values = map.get_all_by(&"shared_key".to_string()).unwrap(); + assert_eq!(secondary_values.len(), 2); + assert!(secondary_values.contains(&value1)); + assert!(secondary_values.contains(&value2)); + + // Test tertiary key access (non-unique) + let tertiary_values = map.get_all_by(&true).unwrap(); + assert_eq!(tertiary_values.len(), 2); + assert!(tertiary_values.contains(&value1)); + assert!(tertiary_values.contains(&value2)); + + // Test removal maintains other entries + map.remove(&1); + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&2), Some(value2.clone())); + + let remaining_secondary = map.get_all_by(&"shared_key".to_string()).unwrap(); + assert_eq!(remaining_secondary.len(), 1); + assert_eq!(remaining_secondary[0], value2); + } + + #[test] + fn test_mixed_uniqueness() { + let map: MultiIndexMap = + MultiIndexMap::new(); + + let value1 = TestValue { + id: 1, + data: "test1".to_string(), + }; + let value2 = TestValue { + id: 2, + data: "test2".to_string(), + }; + + // Insert values with unique secondary key but shared tertiary key + map.insert(&1, &"key1".to_string(), &true, value1.clone()); + map.insert(&2, &"key2".to_string(), &true, value2.clone()); + + // Test unique secondary key access + assert_eq!(map.get_by(&"key1".to_string()), Some(value1.clone())); + assert_eq!(map.get_by(&"key2".to_string()), Some(value2.clone())); + + // Test non-unique tertiary key access + let tertiary_values = map.get_all_by(&true).unwrap(); + assert_eq!(tertiary_values.len(), 2); + assert!(tertiary_values.contains(&value1)); + assert!(tertiary_values.contains(&value2)); + } + + #[test] + fn test_empty_cases() { + let map: MultiIndexMap = + MultiIndexMap::new(); + + // Test access on empty map + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&"key".to_string()), None); + assert_eq!(map.get_by(&true), None); + + // Test remove on empty map + assert_eq!(map.remove(&1), None); + + // Test update on empty map + let value = TestValue { + id: 1, + data: "test".to_string(), + }; + assert_eq!(map.update(&1, value), None); + } +} diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs new file mode 100644 index 00000000..74e47426 --- /dev/null +++ b/anchor/database/src/operator_operations.rs @@ -0,0 +1,75 @@ +use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; +use base64::prelude::*; +use rusqlite::params; +use ssv_types::{Operator, OperatorId}; +use std::sync::atomic::Ordering; + +/// Implements all operator related functionality on the database +impl NetworkDatabase { + /// Insert a new Operator into the database + pub fn insert_operator(&self, operator: &Operator) -> Result<(), DatabaseError> { + // 1ake sure that this operator does not already exist + if self.operator_exists(&operator.id) { + return Err(DatabaseError::NotFound(format!( + "Operator with id {} already in database", + *operator.id + ))); + } + + // Base64 encode the key for storage + let pem_key = operator + .rsa_pubkey + .public_key_to_pem() + .expect("Failed to encode RsaPublicKey"); + let encoded = BASE64_STANDARD.encode(pem_key.clone()); + + // Insert into the database + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::InsertOperator])? + .execute(params![ + *operator.id, // The id of the registered operator + encoded, // RSA public key + operator.owner.to_string() // The owner address of the operator + ])?; + + // Check to see if this operator is the current operator + let own_id = self.state.single_state.id.load(Ordering::Relaxed); + if own_id == u64::MAX { + // If the keys match, this is the current operator so we want to save the id + let keys_match = pem_key == self.pubkey.public_key_to_pem().unwrap_or_default(); + if keys_match { + self.state + .single_state + .id + .store(*operator.id, Ordering::Relaxed); + } + } + // Store the operator in memory + self.state + .single_state + .operators + .insert(operator.id, operator.to_owned()); + Ok(()) + } + + /// Delete an operator + pub fn delete_operator(&self, id: OperatorId) -> Result<(), DatabaseError> { + // Make sure that this operator exists + if !self.operator_exists(&id) { + return Err(DatabaseError::NotFound(format!( + "Operator with id {} not in database", + *id + ))); + } + + // Remove from db and in memory. This should cascade to delete this operator from all of the + // clusters that it is in and all of the shares that it owns + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::DeleteOperator])? + .execute(params![*id])?; + + // Remove the operator + self.state.single_state.operators.remove(&id); + Ok(()) + } +} diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs new file mode 100644 index 00000000..d7a48ddf --- /dev/null +++ b/anchor/database/src/share_operations.rs @@ -0,0 +1,24 @@ +use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; +use rusqlite::{params, Transaction}; +use ssv_types::Share; +use types::PublicKey; + +/// Implements all Share related functionality on the database +impl NetworkDatabase { + pub(crate) fn insert_share( + &self, + tx: &Transaction<'_>, + share: &Share, + validator_pubkey: &PublicKey, + ) -> Result<(), DatabaseError> { + tx.prepare_cached(SQL[&SqlStatement::InsertShare])? + .execute(params![ + validator_pubkey.to_string(), + *share.cluster_id, + *share.operator_id, + share.share_pubkey.to_string(), + share.encrypted_private_key + ])?; + Ok(()) + } +} diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs new file mode 100644 index 00000000..fb182294 --- /dev/null +++ b/anchor/database/src/sql_operations.rs @@ -0,0 +1,140 @@ +use std::collections::HashMap; +use std::sync::LazyLock; + +// Wrappers around various SQL statements used for interacting with the db +#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] +pub(crate) enum SqlStatement { + InsertOperator, // Insert a new Operator in the database + DeleteOperator, // Delete an Operator from the database + GetOperatorId, // Get the ID of this operator from its public key + GetAllOperators, // Get all of the Operators in the database + + InsertCluster, // Insert a new Cluster into the database + InsertClusterMember, // Insert a new Cluster Member into the database + UpdateClusterStatus, // Update the active status of the cluster + UpdateClusterFaulty, // Update the number of faulty Operators in the cluster + GetAllClusters, // Get all Clusters for state reconstruction + GetClusterMembers, // Get all Cluster Members for state reconstruction + + InsertValidator, // Insert a Validator into the database + DeleteValidator, // Delete a Validator from the database + GetAllValidators, // Get all Validators for state reconstruction + + InsertShare, // Insert a KeyShare into the database + GetShares, // Get the releveant keyshare for a validator + + UpdateFeeRecipient, // Update the fee recipient address for a cluster + SetGraffiti, // Update the Graffiti for a validator + + UpdateBlockNumber, // Update the last block that the database has processed + GetBlockNumber, // Get the last block that the database has processed + + GetAllNonces, // Fetch all the Nonce values for every Owner + BumpNonce, // Bump the nonce value for an Owner +} + +pub(crate) static SQL: LazyLock> = LazyLock::new(|| { + let mut m = HashMap::new(); + + // Operator + m.insert( + SqlStatement::InsertOperator, + "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", + ); + m.insert( + SqlStatement::DeleteOperator, + "DELETE FROM operators WHERE operator_id = ?1", + ); + m.insert( + SqlStatement::GetOperatorId, + "SELECT operator_id FROM operators WHERE public_key = ?1", + ); + m.insert(SqlStatement::GetAllOperators, "SELECT * FROM operators"); + + // Cluster + m.insert( + SqlStatement::InsertCluster, + "INSERT OR IGNORE INTO clusters (cluster_id, owner, fee_recipient) VALUES (?1, ?2, ?3)", + ); + m.insert( + SqlStatement::InsertClusterMember, + "INSERT OR IGNORE INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", + ); + m.insert( + SqlStatement::UpdateClusterStatus, + "UPDATE clusters SET liquidated = ?1 WHERE cluster_id = ?2", + ); + m.insert( + SqlStatement::UpdateClusterFaulty, + "UPDATE clusters SET faulty = ?1 WHERE cluster_id = ?2", + ); + m.insert( + SqlStatement::GetAllClusters, + "SELECT DISTINCT + c.cluster_id, + c.owner, + c.fee_recipient, + c.faulty, + c.liquidated + FROM clusters c + JOIN cluster_members cm ON c.cluster_id = cm.cluster_id", + ); + m.insert( + SqlStatement::GetClusterMembers, + "SELECT operator_id FROM cluster_members WHERE cluster_id = ?1", + ); + + // Validator + m.insert( + SqlStatement::InsertValidator, + "INSERT INTO validators (validator_pubkey, cluster_id, validator_index, graffiti) VALUES (?1, ?2, ?3, ?4)", + ); + m.insert( + SqlStatement::DeleteValidator, + "DELETE from validators WHERE validator_pubkey = ?1", + ); + m.insert(SqlStatement::GetAllValidators, "SELECT * FROM validators"); + + // Shares + m.insert( + SqlStatement::InsertShare, + "INSERT INTO shares + (validator_pubkey, cluster_id, operator_id, share_pubkey, encrypted_key) + VALUES + (?1, ?2, ?3, ?4, ?5)", + ); + m.insert( + SqlStatement::GetShares, + "SELECT share_pubkey, encrypted_key, operator_id, cluster_id, validator_pubkey FROM shares WHERE operator_id = ?1" + ); + + // Misc Datta + m.insert( + SqlStatement::UpdateFeeRecipient, + "UPDATE clusters SET fee_recipient = ?1 WHERE owner = ?2", + ); + m.insert( + SqlStatement::SetGraffiti, + "UPDATE validators SET graffiti = ?1 WHERE validator_pubkey = ?2", + ); + + // Blocks + m.insert( + SqlStatement::UpdateBlockNumber, + "UPDATE block SET block_number = ?1", + ); + m.insert( + SqlStatement::GetBlockNumber, + "SELECT block_number FROM block", + ); + + // Nonce + m.insert(SqlStatement::GetAllNonces, "SELECT * FROM nonce"); + m.insert( + SqlStatement::BumpNonce, + "INSERT INTO nonce (owner, nonce) VALUES (?1, 1) + ON CONFLICT (owner) DO UPDATE SET nonce = nonce + 1", + ); + + m +}); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs new file mode 100644 index 00000000..1bf491b7 --- /dev/null +++ b/anchor/database/src/state.rs @@ -0,0 +1,309 @@ +use crate::{ClusterMultiIndexMap, MetadataMultiIndexMap, MultiIndexMap, ShareMultiIndexMap}; +use crate::{DatabaseError, NetworkDatabase, NetworkState, Pool, PoolConn}; +use crate::{MultiState, SingleState}; +use crate::{SqlStatement, SQL}; +use base64::prelude::*; +use dashmap::{DashMap, DashSet}; +use openssl::pkey::Public; +use openssl::rsa::Rsa; +use rusqlite::{params, OptionalExtension}; +use rusqlite::{types::Type, Error as SqlError}; +use ssv_types::{ + Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorMetadata, +}; +use std::collections::HashMap; +use std::str::FromStr; +use std::sync::atomic::{AtomicU64, Ordering}; +use types::Address; + +impl NetworkState { + /// Build the network state from the database data + pub(crate) fn new_with_state( + conn_pool: &Pool, + pubkey: &Rsa, + ) -> Result { + // Get database connection from the pool + let conn = conn_pool.get()?; + + // Get the last processed block from the database + let last_processed_block = Self::get_last_processed_block(&conn)?; + + // Without an ID, we have no idea who we are. Check to see if an operator with our public key + // is stored the database. If it does not exist, that means the operator still has to be registered + // with the network contract or that we have not seen the corresponding event yet + let id = if let Ok(Some(operator_id)) = Self::does_self_exist(&conn, pubkey) { + operator_id + } else { + // If it does not exist, just default the state since we do not know who we are + return Ok(Self { + multi_state: MultiState { + shares: MultiIndexMap::default(), + validator_metadata: MultiIndexMap::default(), + clusters: MultiIndexMap::default(), + }, + single_state: SingleState::default(), + }); + }; + + // First Phase: Fetch data from the database + // 1) OperatorId -> Operator + let operators = Self::fetch_operators(&conn)?; + // 2) ClusterId -> Cluster + let cluster_map = Self::fetch_clusters(&conn)?; + // 3) ClusterId -> Vec + let validator_map = Self::fetch_validators(&conn)?; + // 4) ClusterId -> Vec + let share_map = Self::fetch_shares(&conn, id)?; + // 5) Owner -> Nonce (u16) + let nonce_map = Self::fetch_nonces(&conn)?; + + // Second phase: Populate all in memory stores with data; + let shares_multi: ShareMultiIndexMap = MultiIndexMap::new(); + let metadata_multi: MetadataMultiIndexMap = MultiIndexMap::new(); + let cluster_multi: ClusterMultiIndexMap = MultiIndexMap::new(); + let single_state = SingleState { + id: AtomicU64::new(*id), + last_processed_block: AtomicU64::new(last_processed_block), + operators: DashMap::from_iter(operators), + clusters: DashSet::from_iter(cluster_map.keys().copied()), + nonces: DashMap::from_iter(nonce_map), + }; + + // Populate all multi-index maps in a single pass through clusters + for (cluster_id, cluster) in &cluster_map { + // Get all the validator for this cluster + let validators = validator_map + .get(cluster_id) + .expect("Validator for cluster must exist"); + + // Process each validator and its associated data + for validator in validators { + // Insert cluster and validator metadata + cluster_multi.insert( + cluster_id, + &validator.public_key, + &cluster.owner, + cluster.clone(), + ); + metadata_multi.insert( + &validator.public_key, + cluster_id, + &cluster.owner, + validator.clone(), + ); + + // Process this validators shares + if let Some(shares) = share_map.get(cluster_id) { + for share in shares { + if share.validator_pubkey == validator.public_key { + shares_multi.insert( + &validator.public_key, + cluster_id, + &cluster.owner, + share.clone(), + ); + } + } + } + } + } + + // Return fully constructed state + Ok(Self { + multi_state: MultiState { + shares: shares_multi, + validator_metadata: metadata_multi, + clusters: cluster_multi, + }, + single_state, + }) + } + + // Get the last block that was processed and saved to db + fn get_last_processed_block(conn: &PoolConn) -> Result { + conn.prepare_cached(SQL[&SqlStatement::GetBlockNumber])? + .query_row(params![], |row| row.get(0)) + .map_err(DatabaseError::from) + } + + // Check to see if an operator with the public key already exists in the database + fn does_self_exist( + conn: &PoolConn, + pubkey: &Rsa, + ) -> Result, DatabaseError> { + let encoded = BASE64_STANDARD.encode( + pubkey + .public_key_to_pem() + .expect("Failed to encode RsaPublicKey"), + ); + let mut stmt = conn.prepare(SQL[&SqlStatement::GetOperatorId])?; + stmt.query_row(params![encoded], |row| Ok(OperatorId(row.get(0)?))) + .optional() + .map_err(DatabaseError::from) + } + + // Fetch and transform operator data from database + fn fetch_operators(conn: &PoolConn) -> Result, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllOperators])?; + let operators = stmt + .query_map([], |row| { + // Transform row into an operator and collect into HashMap + let operator: Operator = row.try_into()?; + Ok((operator.id, operator)) + })? + .map(|result| result.map_err(DatabaseError::from)); + operators.collect() + } + + // Fetch and transform validator data from the database + fn fetch_validators( + conn: &PoolConn, + ) -> Result>, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllValidators])?; + let validators = stmt + .query_map([], |row| ValidatorMetadata::try_from(row))? + .map(|result| result.map_err(DatabaseError::from)) + .collect::, _>>()?; + + let mut map = HashMap::new(); + for validator in validators { + map.entry(validator.cluster_id) + .or_insert_with(Vec::new) + .push(validator); + } + Ok(map) + } + + // Fetch and transform cluster data from the database + fn fetch_clusters(conn: &PoolConn) -> Result, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllClusters])?; + let clusters = stmt + .query_map([], |row| { + let cluster_id = ClusterId(row.get(0)?); + + // Get all of the members for this cluster + let cluster_members = Self::fetch_cluster_members(conn, cluster_id)?; + + // Convert row and members into cluster + let cluster = Cluster::try_from((row, cluster_members))?; + Ok((cluster_id, cluster)) + })? + .map(|result| result.map_err(DatabaseError::from)); + clusters.collect::, _>>() + } + + // Fetch members of a specific cluster + fn fetch_cluster_members( + conn: &PoolConn, + cluster_id: ClusterId, + ) -> Result, rusqlite::Error> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetClusterMembers])?; + let members = stmt.query_map([cluster_id.0], |row| { + Ok(ClusterMember { + operator_id: OperatorId(row.get(0)?), + cluster_id, + }) + })?; + + members.collect() + } + + // Fetch the shares for a specific operator + fn fetch_shares( + conn: &PoolConn, + id: OperatorId, + ) -> Result>, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetShares])?; + let shares = stmt + .query_map([*id], |row| Share::try_from(row))? + .map(|result| result.map_err(DatabaseError::from)) + .collect::, _>>()?; + + let mut map = HashMap::new(); + for share in shares { + map.entry(share.cluster_id) + .or_insert_with(Vec::new) + .push(share); + } + Ok(map) + } + + // Fetch all of the owner nonce pairs + fn fetch_nonces(conn: &PoolConn) -> Result, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllNonces])?; + let nonces = stmt + .query_map([], |row| { + // Get the owner from column 0 + let owner_str = row.get::<_, String>(0)?; + let owner = Address::from_str(&owner_str) + .map_err(|e| SqlError::FromSqlConversionFailure(1, Type::Text, Box::new(e)))?; + + // Get he nonce from column 1 + let nonce = row.get::<_, u16>(1)?; + Ok((owner, nonce)) + })? + .map(|result| result.map_err(DatabaseError::from)); + nonces.collect() + } +} + +// Interface over state data +impl NetworkDatabase { + /// Get a reference to the shares map + pub fn shares(&self) -> &ShareMultiIndexMap { + &self.state.multi_state.shares + } + + /// Get a reference to the validator metadata map + pub fn metadata(&self) -> &MetadataMultiIndexMap { + &self.state.multi_state.validator_metadata + } + + /// Get a reference to the cluster map + pub fn clusters(&self) -> &ClusterMultiIndexMap { + &self.state.multi_state.clusters + } + + /// Get the ID of our Operator if it exists + pub fn get_own_id(&self) -> Option { + let id = self.state.single_state.id.load(Ordering::Relaxed); + if id == u64::MAX { + None + } else { + Some(OperatorId(id)) + } + } + + /// Get operator data from in-memory store + pub fn get_operator(&self, id: &OperatorId) -> Option { + self.state.single_state.operators.get(id).map(|v| v.clone()) + } + + /// Check if an operator exists + pub fn operator_exists(&self, id: &OperatorId) -> bool { + self.state.single_state.operators.contains_key(id) + } + + /// Check if we are a member of a specific cluster + pub fn member_of_cluster(&self, id: &ClusterId) -> bool { + self.state.single_state.clusters.contains(id) + } + + /// Get the last block that has been fully processed by the database + pub fn get_last_processed_block(&self) -> u64 { + self.state + .single_state + .last_processed_block + .load(Ordering::Relaxed) + } + + /// Get the nonce of the owner if it exists + pub fn get_nonce(&self, owner: &Address) -> u16 { + self.state + .single_state + .nonces + .get(owner) + .map(|v| *v) + .unwrap_or(0) + } +} diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql new file mode 100644 index 00000000..ae682429 --- /dev/null +++ b/anchor/database/src/table_schema.sql @@ -0,0 +1,63 @@ +CREATE TABLE block ( + block_number INTEGER NOT NULL DEFAULT 0 CHECK (block_number >= 0) +); +INSERT INTO block (block_number) VALUES (0); + +CREATE TABLE nonce ( + owner TEXT NOT NULL PRIMARY KEY, + nonce INTEGER DEFAULT 0 +); + +CREATE TABLE operators ( + operator_id INTEGER PRIMARY KEY, + public_key TEXT NOT NULL, + owner_address TEXT NOT NULL, + UNIQUE (public_key) +); + +CREATE TABLE clusters ( + cluster_id BLOB PRIMARY KEY, + owner TEXT NOT NULL, + fee_recipient TEXT NOT NULL, + faulty INTEGER DEFAULT 0, + liquidated BOOLEAN DEFAULT FALSE +); + +CREATE TABLE cluster_members ( + cluster_id BLOB NOT NULL, + operator_id INTEGER NOT NULL, + PRIMARY KEY (cluster_id, operator_id), + FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE, + FOREIGN KEY (operator_id) REFERENCES operators(operator_id) ON DELETE CASCADE +); + +CREATE TABLE validators ( + validator_pubkey TEXT PRIMARY KEY, + cluster_id BLOB NOT NULL, + validator_index INTEGER DEFAULT 0, + graffiti BLOB DEFAULT X'0000000000000000000000000000000000000000000000000000000000000000', + FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) +); + +CREATE TABLE shares ( + validator_pubkey TEXT NOT NULL, + cluster_id BLOB NOT NULL, + operator_id INTEGER NOT NULL, + share_pubkey TEXT, + encrypted_key BLOB, + PRIMARY KEY (validator_pubkey, operator_id), + FOREIGN KEY (cluster_id, operator_id) REFERENCES cluster_members(cluster_id, operator_id) ON DELETE CASCADE, + FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE +); + +-- Add trigger to clean up empty clusters +CREATE TRIGGER delete_empty_clusters +AFTER DELETE ON validators +WHEN NOT EXISTS ( + SELECT 1 FROM validators + WHERE cluster_id = OLD.cluster_id +) +BEGIN + DELETE FROM clusters WHERE cluster_id = OLD.cluster_id; +END; + diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs new file mode 100644 index 00000000..33c1755a --- /dev/null +++ b/anchor/database/src/tests/cluster_tests.rs @@ -0,0 +1,102 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod cluster_database_tests { + use super::*; + + #[test] + // Test inserting a cluster into the database + fn test_insert_retrieve_cluster() { + let fixture = TestFixture::new(); + assertions::cluster::exists_in_db(&fixture.db, &fixture.cluster); + assertions::cluster::exists_in_memory(&fixture.db, &fixture.cluster); + assertions::validator::exists_in_memory(&fixture.db, &fixture.validator); + assertions::validator::exists_in_db(&fixture.db, &fixture.validator); + assertions::share::exists_in_db( + &fixture.db, + &fixture.validator.public_key, + &fixture.shares, + ); + } + + #[test] + // Test deleting the last validator from a cluster and make sure the metadata, + // cluster, cluster members, and shares are all cleaned up + fn test_delete_last_validator() { + let fixture = TestFixture::new(); + let pubkey = fixture.validator.public_key.clone(); + assert!(fixture.db.delete_validator(&pubkey).is_ok()); + + // Since there was only one validator in the cluster, everything should be removed + assertions::cluster::exists_not_in_db(&fixture.db, fixture.cluster.cluster_id); + assertions::cluster::exists_not_in_memory(&fixture.db, fixture.cluster.cluster_id); + assertions::validator::exists_not_in_db(&fixture.db, &fixture.validator); + assertions::validator::exists_not_in_memory(&fixture.db, &fixture.validator); + assertions::share::exists_not_in_db(&fixture.db, &pubkey); + assertions::share::exists_not_in_memory(&fixture.db, &pubkey); + } + + #[test] + // Test updating the fee recipient + fn test_update_fee_recipient() { + let fixture = TestFixture::new(); + let mut cluster = fixture.cluster; + let new_fee_recipient = Address::random(); + + // Update fee recipient + assert!(fixture + .db + .update_fee_recipient(cluster.owner, new_fee_recipient) + .is_ok()); + + //assertions will compare the data + cluster.fee_recipient = new_fee_recipient; + assertions::cluster::exists_in_db(&fixture.db, &cluster); + assertions::cluster::exists_in_memory(&fixture.db, &cluster); + } + + #[test] + // Try inserting a cluster that does not already have registers operators in the database + fn test_insert_cluster_without_operators() { + let fixture = TestFixture::new_empty(); + let cluster = generators::cluster::random(4); + let metadata = generators::validator::random_metadata(cluster.cluster_id); + let shares = vec![generators::share::random( + cluster.cluster_id, + OperatorId(1), + &fixture.validator.public_key, + )]; + fixture + .db + .insert_validator(cluster, metadata, shares) + .expect_err("Insertion should fail"); + } + + #[test] + // Test updating the operational status of the cluster + fn test_update_cluster_status() { + let fixture = TestFixture::new(); + let mut cluster = fixture.cluster; + + // Test updating to liquidated + fixture + .db + .update_status(cluster.cluster_id, true) + .expect("Failed to update cluster status"); + + // verify in memory and db + cluster.liquidated = true; + assertions::cluster::exists_in_db(&fixture.db, &cluster); + assertions::cluster::exists_in_memory(&fixture.db, &cluster); + } + + #[test] + // Test inserting a cluster that already exists + fn test_duplicate_cluster_insert() { + let fixture = TestFixture::new(); + fixture + .db + .insert_validator(fixture.cluster, fixture.validator, fixture.shares) + .expect_err("Expected failure when inserting cluster that already exists"); + } +} diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs new file mode 100644 index 00000000..f8f29a98 --- /dev/null +++ b/anchor/database/src/tests/mod.rs @@ -0,0 +1,28 @@ +mod cluster_tests; +mod operator_tests; +mod state_tests; +mod utils; +mod validator_tests; + +pub mod test_prelude { + pub use super::utils::*; + pub use crate::multi_index::UniqueIndex; + pub use crate::NetworkDatabase; + pub use ssv_types::*; + pub use tempfile::tempdir; + pub use types::{Address, Graffiti, PublicKey}; +} + +#[cfg(test)] +mod database_test { + use super::test_prelude::*; + + #[test] + fn test_create_database() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let pubkey = generators::pubkey::random_rsa(); + let db = NetworkDatabase::new(&file, &pubkey); + assert!(db.is_ok()); + } +} diff --git a/anchor/database/src/tests/operator_tests.rs b/anchor/database/src/tests/operator_tests.rs new file mode 100644 index 00000000..68179674 --- /dev/null +++ b/anchor/database/src/tests/operator_tests.rs @@ -0,0 +1,99 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod operator_database_tests { + use super::*; + + #[test] + // Test to make sure we can insert new operators into the database and they are present in the + // state stores + fn test_insert_retrieve_operator() { + // Create a new text fixture with empty db + let fixture = TestFixture::new_empty(); + + // Generate a new operator and insert it + let operator = generators::operator::with_id(1); + fixture + .db + .insert_operator(&operator) + .expect("Failed to insert operator"); + + // Confirm that it exists both in the db and the state store + assertions::operator::exists_in_db(&fixture.db, &operator); + assertions::operator::exists_in_memory(&fixture.db, &operator); + } + + #[test] + // Ensure that we cannot insert a duplicate operator into the database + fn test_duplicate_insert() { + // Create a new test fixture with empty db + let fixture = TestFixture::new_empty(); + + // Generate a new operator and insert it + let operator = generators::operator::with_id(1); + fixture + .db + .insert_operator(&operator) + .expect("Failed to insert operator"); + + // Try to insert it again, this should fail + assert!(fixture.db.insert_operator(&operator).is_err()); + } + + #[test] + // Test deleting an operator and confirming it is gone from the db and in memory + fn test_insert_delete_operator() { + // Create new test fixture with empty db + let fixture = TestFixture::new_empty(); + + // Generate a new operator and insert it + let operator = generators::operator::with_id(1); + fixture + .db + .insert_operator(&operator) + .expect("Failed to insert operator"); + + // Now, delete the operator + fixture + .db + .delete_operator(operator.id) + .expect("Failed to delete operator"); + + // Confirm that it is gone + assertions::operator::exists_not_in_memory(&fixture.db, operator.id); + assertions::operator::exists_not_in_db(&fixture.db, operator.id); + } + + #[test] + // Test inserting multiple operators + fn test_insert_multiple_operators() { + // Create new test fixture with empty db + let fixture = TestFixture::new_empty(); + + // Generate and insert operators + let operators: Vec = (0..4).map(generators::operator::with_id).collect(); + for operator in &operators { + fixture + .db + .insert_operator(operator) + .expect("Failed to insert operator"); + } + + // Delete them all and confirm deletion + for operator in operators { + fixture + .db + .delete_operator(operator.id) + .expect("Failed to delete operator"); + assertions::operator::exists_not_in_memory(&fixture.db, operator.id); + assertions::operator::exists_not_in_db(&fixture.db, operator.id); + } + } + + #[test] + /// Try to delete an operator that does not exist + fn test_delete_dne_operator() { + let fixture = TestFixture::new_empty(); + assert!(fixture.db.delete_operator(OperatorId(1)).is_err()) + } +} diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs new file mode 100644 index 00000000..9e3b6a96 --- /dev/null +++ b/anchor/database/src/tests/state_tests.rs @@ -0,0 +1,158 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod state_database_tests { + use super::*; + + #[test] + // Test that the previously inserted operators are present after restart + fn test_operator_store() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(); + + // drop the database and then recreate it + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // confirm that all of the operators exist + for operator in &fixture.operators { + assertions::operator::exists_in_db(&fixture.db, operator); + assertions::operator::exists_in_memory(&fixture.db, operator); + } + } + + #[test] + // Test that the proper cluster data is present after restart + fn test_cluster_after_restart() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(); + let cluster = fixture.cluster; + + // drop the database and then recreate it + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // confirm all data is what we expect + assertions::cluster::exists_in_memory(&fixture.db, &cluster); + assertions::validator::exists_in_memory(&fixture.db, &fixture.validator); + } + + #[test] + // Test that a this operator owns is in memory after restart + fn test_shares_after_restart() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(); + + // drop and recrate database + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // Confim share data, there should be one share in memory for this operator + assert!(fixture.db.shares().length() == 1); + let pk = &fixture.validator.public_key; + let share = fixture + .db + .shares() + .get_by(pk) + .expect("The share should exist"); + assertions::share::exists_in_memory(&fixture.db, pk, &share); + } + + #[test] + // Test that we have multi validators in memory after restart + fn test_multiple_entries() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(); + + // Generate new validator information + let cluster = fixture.cluster; + let new_validator = generators::validator::random_metadata(cluster.cluster_id); + let mut shares: Vec = Vec::new(); + fixture.operators.iter().for_each(|op| { + let share = + generators::share::random(cluster.cluster_id, op.id, &new_validator.public_key); + shares.push(share); + }); + fixture + .db + .insert_validator(cluster, new_validator, shares) + .expect("Insert should not fail"); + + // drop and recrate database + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // assert that there are two validators, one cluster, and 2 shares in memory + assert!(fixture.db.metadata().length() == 2); + assert!(fixture.db.shares().length() == 2); + assert!(fixture.db.clusters().length() == 1); + } + + #[test] + // Test that you can update and retrieve a block number + fn test_block_number() { + let fixture = TestFixture::new(); + assert_eq!(fixture.db.get_last_processed_block(), 0); + fixture + .db + .processed_block(10) + .expect("Failed to update the block number"); + assert_eq!(fixture.db.get_last_processed_block(), 10); + } + + #[test] + // Test to make sure the block number is loaded in after restart + fn test_block_number_after_restart() { + let mut fixture = TestFixture::new(); + fixture + .db + .processed_block(10) + .expect("Failed to update the block number"); + drop(fixture.db); + + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + assert_eq!(fixture.db.get_last_processed_block(), 10); + } + + #[test] + // Test to make sure we can retrieve and increment a nonce + fn test_retrieve_increment_nonce() { + let fixture = TestFixture::new(); + let owner = Address::random(); + + // this is the first time getting the nonce, so it should be zero + let nonce = fixture.db.get_nonce(&owner); + assert_eq!(nonce, 0); + + // increment the nonce and then confirm that is is one + fixture + .db + .bump_nonce(&owner) + .expect("Failed in increment nonce"); + let nonce = fixture.db.get_nonce(&owner); + assert_eq!(nonce, 1); + } + + #[test] + // Test to make sure a nonce persists after a restart + fn test_nonce_after_restart() { + let mut fixture = TestFixture::new(); + let owner = Address::random(); + fixture + .db + .bump_nonce(&owner) + .expect("Failed in increment nonce"); + + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // confirm that nonce is 1 + assert_eq!(fixture.db.get_nonce(&owner), 1); + } +} diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs new file mode 100644 index 00000000..a822d9f0 --- /dev/null +++ b/anchor/database/src/tests/utils.rs @@ -0,0 +1,504 @@ +use super::test_prelude::*; +use openssl::pkey::Public; +use openssl::rsa::Rsa; +use rand::Rng; +use rusqlite::params; +use std::path::PathBuf; +use tempfile::TempDir; +use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + +const DEFAULT_NUM_OPERATORS: u64 = 4; +const RSA_KEY_SIZE: u32 = 2048; +const DEFAULT_SEED: [u8; 16] = [42; 16]; + +// Test fixture for common scnearios +#[derive(Debug)] +pub struct TestFixture { + pub db: NetworkDatabase, + pub cluster: Cluster, + pub validator: ValidatorMetadata, + pub shares: Vec, + pub operators: Vec, + pub path: PathBuf, + pub pubkey: Rsa, + _temp_dir: TempDir, +} + +impl TestFixture { + // Generate a database that is populated with a full cluster. This operator is a part of the + // cluster, so membership data should be saved + pub fn new() -> Self { + // generate the operators and pick the first one to be us + let operators: Vec = (0..DEFAULT_NUM_OPERATORS) + .map(generators::operator::with_id) + .collect(); + let us = operators + .first() + .expect("Failed to get operator") + .rsa_pubkey + .clone(); + + let temp_dir = TempDir::new().expect("Failed to create temporary directory"); + let db_path = temp_dir.path().join("test.db"); + let db = NetworkDatabase::new(&db_path, &us).expect("Failed to create DB"); + + // Insert all of the operators + operators.iter().for_each(|op| { + db.insert_operator(op).expect("Failed to insert operator"); + }); + + // Build a cluster with all of the operators previously inserted + let cluster = generators::cluster::with_operators(&operators); + + // Generate one validator that will delegate to this cluster + let validator = generators::validator::random_metadata(cluster.cluster_id); + + // Generate shares for the validator. Each operator will have one share + let shares: Vec = operators + .iter() + .map(|op| generators::share::random(cluster.cluster_id, op.id, &validator.public_key)) + .collect(); + + db.insert_validator(cluster.clone(), validator.clone(), shares.clone()) + .expect("Failed to insert cluster"); + + // End state: + // There are DEFAULT_NUM_OPERATORS operators in the network + // There is a single cluster with a single validator + // The operators acting on behalf of the validator are all of the operators in the network + // Each operator has a piece of the keyshare for the validator + + Self { + db, + cluster, + operators, + validator, + shares, + path: db_path, + pubkey: us, + _temp_dir: temp_dir, + } + } + + // Generate an empty database and pick a random public key to be us + pub fn new_empty() -> Self { + let temp_dir = TempDir::new().expect("Failed to create temporary directory"); + let db_path = temp_dir.path().join("test.db"); + let pubkey = generators::pubkey::random_rsa(); + + let db = NetworkDatabase::new(&db_path, &pubkey).expect("Failed to create test database"); + let cluster = generators::cluster::random(0); + + Self { + db, + validator: generators::validator::random_metadata(cluster.cluster_id), + cluster, + operators: Vec::new(), + shares: Vec::new(), + path: db_path, + pubkey, + _temp_dir: temp_dir, + } + } +} + +// Generator functions for test data +pub mod generators { + use super::*; + + // Generate a random operator. Either with a specific id or a specific public key + pub mod operator { + use super::*; + + pub fn with_id(id: u64) -> Operator { + let public_key = generators::pubkey::random_rsa(); + Operator::new_with_pubkey(public_key, OperatorId(id), Address::random()) + } + } + + pub mod cluster { + use super::*; + + // Generate a random cluster with a specific number of operators + pub fn random(num_operators: u64) -> Cluster { + let cluster_id: [u8; 32] = rand::thread_rng().gen(); + let cluster_id = ClusterId(cluster_id); + let members = (0..num_operators).map(OperatorId).collect(); + let owner_recipient = Address::random(); + + Cluster { + cluster_id, + owner: owner_recipient, + fee_recipient: owner_recipient, + faulty: 0, + liquidated: false, + cluster_members: members, + } + } + + // Generate a cluster with a specific set of operators + pub fn with_operators(operators: &[Operator]) -> Cluster { + let cluster_id: [u8; 32] = rand::thread_rng().gen(); + let cluster_id = ClusterId(cluster_id); + let members = operators.iter().map(|op| op.id).collect(); + let owner_recipient = Address::random(); + + Cluster { + cluster_id, + owner: owner_recipient, + fee_recipient: owner_recipient, + faulty: 0, + liquidated: false, + cluster_members: members, + } + } + } + + pub mod share { + use super::*; + // Generate a random keyshare + pub fn random(cluster_id: ClusterId, operator_id: OperatorId, pk: &PublicKey) -> Share { + Share { + validator_pubkey: pk.clone(), + operator_id, + cluster_id, + share_pubkey: pubkey::random(), + encrypted_private_key: [0u8; 256], + } + } + } + + pub mod pubkey { + use super::*; + + // Generate a random RSA public key for operators + pub fn random_rsa() -> Rsa { + let priv_key = Rsa::generate(RSA_KEY_SIZE).expect("Failed to generate RSA key"); + priv_key + .public_key_to_pem() + .and_then(|pem| Rsa::public_key_from_pem(&pem)) + .expect("Failed to process RSA key") + } + + // Generate a random public key for validators + pub fn random() -> PublicKey { + let rng = &mut XorShiftRng::from_seed(DEFAULT_SEED); + PublicKey::random_for_test(rng) + } + } + + pub mod validator { + use super::*; + + // Generate random ValidatorMetdata + // assumes fee_recipient = owner. + pub fn random_metadata(cluster_id: ClusterId) -> ValidatorMetadata { + ValidatorMetadata { + public_key: pubkey::random(), + cluster_id, + index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), + graffiti: Graffiti::default(), + } + } + } +} + +// Database queries for testing +// This will extract information corresponding to the original tables +pub mod queries { + use super::*; + use std::str::FromStr; + + // Single selection query statements + const GET_OPERATOR: &str = + "SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1"; + const GET_CLUSTER: &str = "SELECT cluster_id, owner, fee_recipient, faulty, liquidated FROM clusters WHERE cluster_id = ?1"; + const GET_SHARES: &str = "SELECT share_pubkey, encrypted_key, cluster_id, operator_id FROM shares WHERE validator_pubkey = ?1"; + const GET_VALIDATOR: &str = "SELECT validator_pubkey, cluster_id, validator_index, graffiti FROM validators WHERE validator_pubkey = ?1"; + const GET_MEMBERS: &str = "SELECT operator_id FROM cluster_members WHERE cluster_id = ?1"; + + // Get an operator from the database + pub fn get_operator(db: &NetworkDatabase, id: OperatorId) -> Option { + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare(GET_OPERATOR) + .expect("Failed to prepare statement"); + + stmt.query_row(params![*id], |row| { + let operator = Operator::try_from(row).expect("Failed to create operator"); + Ok(operator) + }) + .ok() + } + + // Get a Cluster from the database + pub fn get_cluster(db: &NetworkDatabase, id: ClusterId) -> Option { + let members = get_cluster_members(db, id)?; + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare(GET_CLUSTER) + .expect("Failed to prepare statement"); + + stmt.query_row(params![*id], |row| { + let cluster = Cluster::try_from((row, members))?; + Ok(cluster) + }) + .ok() + } + + // Get a share from the database + pub fn get_shares(db: &NetworkDatabase, pubkey: &PublicKey) -> Option> { + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare(GET_SHARES) + .expect("Failed to prepare statement"); + let shares: Result, _> = stmt + .query_map(params![pubkey.to_string()], |row| { + let share_pubkey_str = row.get::<_, String>(0)?; + let share_pubkey = PublicKey::from_str(&share_pubkey_str).unwrap(); + let encrypted_private_key: [u8; 256] = row.get(1)?; + + // Get the OperatorId from column 6 and ClusterId from column 1 + let cluster_id = ClusterId(row.get(2)?); + let operator_id = OperatorId(row.get(3)?); + + Ok(Share { + validator_pubkey: pubkey.clone(), + operator_id, + cluster_id, + share_pubkey, + encrypted_private_key, + }) + }) + .ok()? + .collect(); + match shares { + Ok(vec) if !vec.is_empty() => Some(vec), + _ => None, + } + } + + // Get a ClusterMember from the database + fn get_cluster_members( + db: &NetworkDatabase, + cluster_id: ClusterId, + ) -> Option> { + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare(GET_MEMBERS) + .expect("Failed to prepare statement"); + let members: Result, _> = stmt + .query_map([cluster_id.0], |row| { + Ok(ClusterMember { + operator_id: OperatorId(row.get(0)?), + cluster_id, + }) + }) + .ok()? + .collect(); + match members { + Ok(vec) if !vec.is_empty() => Some(vec), + _ => None, + } + } + + // Get ValidatorMetadata from the database + pub fn get_validator( + db: &NetworkDatabase, + validator_pubkey: &str, + ) -> Option { + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare(GET_VALIDATOR) + .expect("Failed to prepare statement"); + + stmt.query_row(params![validator_pubkey], |row| { + let validator = ValidatorMetadata::try_from(row)?; + Ok(validator) + }) + .ok() + } +} + +/// Database assertions for testing +pub mod assertions { + + use super::*; + + // Assertions on operator information fetches from in memory and the database + pub mod operator { + use super::*; + + // Asserts data between the two operators is the same + fn data(op1: &Operator, op2: &Operator) { + // Verify all fields match + assert_eq!(op1.id, op2.id, "Operator ID mismatch"); + assert_eq!( + op1.rsa_pubkey.public_key_to_pem().unwrap(), + op2.rsa_pubkey.public_key_to_pem().unwrap(), + "Operator public key mismatch" + ); + assert_eq!(op1.owner, op2.owner, "Operator owner mismatch"); + } + + // Verifies that the operator is in memory + pub fn exists_in_memory(db: &NetworkDatabase, operator: &Operator) { + let stored_operator = db + .get_operator(&operator.id) + .expect("Operator should exist"); + data(operator, &stored_operator); + } + + // Verifies that the operator is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, operator: OperatorId) { + assert!(!db.operator_exists(&operator)); + } + + // Verify that the operator is in the database + pub fn exists_in_db(db: &NetworkDatabase, operator: &Operator) { + let db_operator = + queries::get_operator(db, operator.id).expect("Operator not found in database"); + data(operator, &db_operator); + } + + // Verify that the operator does not exist in the database + pub fn exists_not_in_db(db: &NetworkDatabase, operator_id: OperatorId) { + // Check database + assert!( + queries::get_operator(db, operator_id).is_none(), + "Operator still exists in database" + ); + } + } + + // All validator related assertions + pub mod validator { + use super::*; + + fn data(v1: &ValidatorMetadata, v2: &ValidatorMetadata) { + assert_eq!(v1.cluster_id, v2.cluster_id); + assert_eq!(v1.graffiti, v2.graffiti); + assert_eq!(v1.index, v2.index); + assert_eq!(v1.public_key, v2.public_key); + } + // Verifies that the cluster is in memory + pub fn exists_in_memory(db: &NetworkDatabase, v: &ValidatorMetadata) { + let stored_validator = db + .metadata() + .get_by(&v.public_key) + .expect("Metadata should exist"); + data(v, &stored_validator); + } + + // Verifies that the cluster is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, v: &ValidatorMetadata) { + let stored_validator = db.metadata().get_by(&v.public_key); + assert!(stored_validator.is_none()); + } + + // Verify that the cluster is in the database + pub fn exists_in_db(db: &NetworkDatabase, v: &ValidatorMetadata) { + let db_validator = queries::get_validator(db, &v.public_key.to_string()) + .expect("Validator should exist"); + data(v, &db_validator); + } + + // Verify that the cluster does not exist in the database + pub fn exists_not_in_db(db: &NetworkDatabase, v: &ValidatorMetadata) { + let db_validator = queries::get_validator(db, &v.public_key.to_string()); + assert!(db_validator.is_none()); + } + } + + // Cluster assetions + pub mod cluster { + use super::*; + fn data(c1: &Cluster, c2: &Cluster) { + assert_eq!(c1.cluster_id, c2.cluster_id); + assert_eq!(c1.owner, c2.owner); + assert_eq!(c1.fee_recipient, c2.fee_recipient); + assert_eq!(c1.faulty, c2.faulty); + assert_eq!(c1.liquidated, c2.liquidated); + assert_eq!(c1.cluster_members, c2.cluster_members); + } + // Verifies that the cluster is in memory + pub fn exists_in_memory(db: &NetworkDatabase, c: &Cluster) { + assert!(db.member_of_cluster(&c.cluster_id)); + let stored_cluster = db + .clusters() + .get_by(&c.cluster_id) + .expect("Cluster should exist"); + data(c, &stored_cluster) + } + + // Verifies that the cluster is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, cluster_id: ClusterId) { + assert!(!db.member_of_cluster(&cluster_id)); + let stored_cluster = db.clusters().get_by(&cluster_id); + assert!(stored_cluster.is_none()); + } + + // Verify that the cluster is in the database + pub fn exists_in_db(db: &NetworkDatabase, c: &Cluster) { + let db_cluster = + queries::get_cluster(db, c.cluster_id).expect("Cluster not found in database"); + data(c, &db_cluster); + } + + // Verify that the cluster does not exist in the database + pub fn exists_not_in_db(db: &NetworkDatabase, cluster_id: ClusterId) { + // Check database + assert!( + queries::get_cluster(db, cluster_id).is_none(), + "Cluster exists in database" + ); + } + } + + // + pub mod share { + use super::*; + fn data(s1: &Share, s2: &Share) { + assert_eq!(s1.cluster_id, s2.cluster_id); + assert_eq!(s1.encrypted_private_key, s2.encrypted_private_key); + assert_eq!(s1.operator_id, s2.operator_id); + assert_eq!(s1.share_pubkey, s2.share_pubkey); + } + + // Verifies that a share is in memory + pub fn exists_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey, s: &Share) { + let stored_share = db + .shares() + .get_by(validator_pubkey) + .expect("Share should exist"); + data(s, &stored_share); + } + + // Verifies that a share is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey) { + let stored_share = db.shares().get_by(validator_pubkey); + assert!(stored_share.is_none()); + } + + // Verifies that all of the shares for a validator are in the database + pub fn exists_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey, s: &[Share]) { + let db_shares = + queries::get_shares(db, validator_pubkey).expect("Shares should exist in db"); + // have to pair them up since we dont know what order they will be returned from db in + db_shares + .iter() + .flat_map(|share| { + s.iter() + .filter(|share2| share.operator_id == share2.operator_id) + .map(move |share2| (share, share2)) + }) + .for_each(|(share, share2)| data(share, share2)); + } + + // Verifies that all of the shares for a validator are not in the database + pub fn exists_not_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey) { + let shares = queries::get_shares(db, validator_pubkey); + assert!(shares.is_none()); + } + } +} diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs new file mode 100644 index 00000000..7cda1b9a --- /dev/null +++ b/anchor/database/src/tests/validator_tests.rs @@ -0,0 +1,26 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod validator_database_tests { + use super::*; + + #[test] + /// Test updating the graffiti of a validator + fn test_update_graffiti() { + let fixture = TestFixture::new(); + let new_graffiti = Graffiti::default(); + let mut validator = fixture.validator; + + // update the graffiti + assert!(fixture + .db + .update_graffiti(&validator.public_key, new_graffiti) + .is_ok()); + + // confirm that it has changed both in the db and memory + // exists call will also check data values + validator.graffiti = new_graffiti; + assertions::validator::exists_in_db(&fixture.db, &validator); + assertions::validator::exists_in_memory(&fixture.db, &validator); + } +} diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs new file mode 100644 index 00000000..ee4a8930 --- /dev/null +++ b/anchor/database/src/validator_operations.rs @@ -0,0 +1,53 @@ +use crate::{multi_index::UniqueIndex, DatabaseError, NetworkDatabase, SqlStatement, SQL}; +use rusqlite::params; +use types::{Address, Graffiti, PublicKey}; + +/// Implements all validator specific database functionality +impl NetworkDatabase { + /// Update the fee recipient address for all validators in a cluster + pub fn update_fee_recipient( + &self, + owner: Address, + fee_recipient: Address, + ) -> Result<(), DatabaseError> { + // Make sure the cluster exists by getting the in memory entry + if let Some(mut cluster) = self.clusters().get_by(&owner) { + // Update the database + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? + .execute(params![ + fee_recipient.to_string(), // New fee recipient address for entire cluster + owner.to_string() // Owner of the cluster + ])?; + + // Update in memory + cluster.fee_recipient = fee_recipient; + self.clusters() + .update(&cluster.cluster_id, cluster.to_owned()); + } + Ok(()) + } + + /// Update the Graffiti for a Validator + pub fn update_graffiti( + &self, + validator_pubkey: &PublicKey, + graffiti: Graffiti, + ) -> Result<(), DatabaseError> { + // Make sure this validator exists by getting the in memory entry + if let Some(mut validator) = self.metadata().get_by(validator_pubkey) { + // Update the database + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::SetGraffiti])? + .execute(params![ + graffiti.0.as_slice(), // New graffiti + validator_pubkey.to_string() // The public key of the validator + ])?; + + // Update in memory + validator.graffiti = graffiti; + self.metadata().update(validator_pubkey, validator); + } + Ok(()) + } +}