From 1f5860751acf850e8a1bf65b35a972211baa337f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 5 Dec 2024 14:28:46 +0000 Subject: [PATCH 01/50] new types --- Cargo.toml | 8 +++- anchor/common/ssv_types/Cargo.toml | 11 +++++ anchor/common/ssv_types/src/cluster.rs | 30 ++++++++++++++ anchor/common/ssv_types/src/lib.rs | 7 ++++ anchor/common/ssv_types/src/operator.rs | 54 +++++++++++++++++++++++++ anchor/common/ssv_types/src/share.rs | 30 ++++++++++++++ anchor/common/ssv_types/src/util.rs | 29 +++++++++++++ 7 files changed, 168 insertions(+), 1 deletion(-) create mode 100644 anchor/common/ssv_types/Cargo.toml create mode 100644 anchor/common/ssv_types/src/cluster.rs create mode 100644 anchor/common/ssv_types/src/lib.rs create mode 100644 anchor/common/ssv_types/src/operator.rs create mode 100644 anchor/common/ssv_types/src/share.rs create mode 100644 anchor/common/ssv_types/src/util.rs diff --git a/Cargo.toml b/Cargo.toml index bbdc34b7..19c2f647 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,9 @@ members = [ "anchor/http_metrics", "anchor/qbft", "anchor/network", - "anchor/common/version" + "anchor/database", + "anchor/common/version", + "anchor/common/ssv_types" ] resolver = "2" @@ -21,6 +23,7 @@ http_api = { path = "anchor/http_api" } http_metrics = { path = "anchor/http_metrics" } network = { path ="anchor/network"} version = { path ="anchor/common/version"} +ssv_types = { path = "anchor/common/ssv_types" } lighthouse_network = { git = "https://github.com/sigp/lighthouse", branch = "unstable"} task_executor = { git = "https://github.com/sigp/lighthouse", branch = "unstable", default-features = false, features = [ "tracing", ] } metrics = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } @@ -28,6 +31,7 @@ validator_metrics = { git = "https://github.com/agemanning/lighthouse", branch = sensitive_url = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } slot_clock = { git = "https://github.com/agemanning/lighthouse", branch = "modularize-vc" } unused_port = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } +types = { git = "https://github.com/sigp/lighthouse", branch = "unstable" } derive_more = { version = "1.0.0", features = ["full"] } async-channel = "1.9" axum = "0.7.7" @@ -50,6 +54,8 @@ tokio = { version = "1.39.2", features = [ ] } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } +rsa = { version = "0.9.7", features = ["pem"] } +base64 = "0.22.1" [profile.maxperf] inherits = "release" diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml new file mode 100644 index 00000000..c1c35dc5 --- /dev/null +++ b/anchor/common/ssv_types/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "ssv_types" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +types = { workspace = true} +rsa = { workspace = true } +derive_more = { workspace = true } +base64 = { workspace = true } diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs new file mode 100644 index 00000000..77d8f3ec --- /dev/null +++ b/anchor/common/ssv_types/src/cluster.rs @@ -0,0 +1,30 @@ +use crate::OperatorId; +use crate::Share; +use derive_more::{Deref, From}; + +/// Unique identifier for a cluster +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] +pub struct ClusterId(pub u64); + +/// A Cluster is a group of Operators that are acting on behalf of a Validator +pub struct Cluster { + /// Unique identifier for a Cluster + pub cluster_id: ClusterId, + /// All of the members of this Cluster + pub cluster_members: Vec, + /// The number of faulty operator in the Cluster + pub faulty: u64, + /// If the Cluster is liquidated or active + pub liquidated: bool, +} + +/// A member of a Cluster. This is just an Operator that holds onto a share of the Validator key +#[derive(Debug, Clone)] +pub struct ClusterMember { + /// Unique identifier for the Operator this member represents + pub operator_id: OperatorId, + /// Unique identifier for the Cluster this member is a part of + pub cluster_id: ClusterId, + /// The Share this member is responsible for + pub share: Share, +} diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs new file mode 100644 index 00000000..271ecb1a --- /dev/null +++ b/anchor/common/ssv_types/src/lib.rs @@ -0,0 +1,7 @@ +pub use cluster::{Cluster, ClusterId, ClusterMember}; +pub use operator::{Operator, OperatorId}; +pub use share::{Share, ValidatorMetadata, ValidatorIndex}; +mod cluster; +mod operator; +mod share; +mod util; diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs new file mode 100644 index 00000000..70b96296 --- /dev/null +++ b/anchor/common/ssv_types/src/operator.rs @@ -0,0 +1,54 @@ +use crate::util::parse_rsa; +use derive_more::{Deref, From}; +use rsa::RsaPublicKey; +use std::cmp::Eq; +use std::fmt::Debug; +use std::hash::Hash; + +/// Unique identifier for an Operator. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] +pub struct OperatorId(u64); + +/// Client responsible for maintaining the overall health of the network. +#[derive(Debug, Clone)] +pub struct Operator { + /// ID to uniquely identify this operator + pub id: OperatorId, + /// Base-64 encoded PEM RSA public key + pub public_key: RsaPublicKey, +} + +impl Operator { + /// Creates a new operator from its OperatorId and PEM-encoded public key string + pub fn new(pem_data: &str, operator_id: OperatorId) -> Result { + let rsa_pubkey = parse_rsa(pem_data)?; + Ok(Self::new_with_pubkey(rsa_pubkey, operator_id)) + } + + // Creates a new operator from an existing RSA public key and OperatorId + pub fn new_with_pubkey(rsa_pubkey: RsaPublicKey, operator_id: OperatorId) -> Self { + Self { + id: operator_id, + public_key: rsa_pubkey, + } + } +} + +#[cfg(test)] +mod operator_tests { + use super::*; + + #[test] + fn operator_from_pubkey_and_id() { + // Random valid operator public key and id: https://explorer.ssv.network/operators/1141 + let pem_data = "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBbFFmQVIzMEd4bFpacEwrNDByU0IKTEpSYlkwY2laZDBVMXhtTlp1bFB0NzZKQXJ5d2lia0Y4SFlQV2xkM3dERVdWZXZjRzRGVVBSZ0hDM1MrTHNuMwpVVC9TS280eE9nNFlnZ0xqbVVXQysyU3ZGRFhXYVFvdFRXYW5UU0drSEllNGFnTVNEYlUzOWhSMWdOSTJhY2NNCkVCcjU2eXpWcFMvKytkSk5xU002S1FQM3RnTU5ia2IvbEtlY0piTXM0ZWNRMTNkWUQwY3dFNFQxcEdTYUdhcEkKbFNaZ2lYd0cwSGFNTm5GUkt0OFlkZjNHaTFMRlh3Zlo5NHZFRjJMLzg3RCtidjdkSFVpSGRjRnh0Vm0rVjVvawo3VFptcnpVdXB2NWhKZ3lDVE9zc0xHOW1QSGNORnhEVDJ4NUJKZ2FFOVpJYnMrWVZ5a1k3UTE4VEhRS2lWcDFaCmp3SURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K"; + let operator_id = 1141; + + let operator = Operator::new(pem_data, operator_id.into()); + assert!(operator.is_ok()); + + if let Ok(op) = operator { + assert_eq!(op.id.0, operator_id); + } + } +} diff --git a/anchor/common/ssv_types/src/share.rs b/anchor/common/ssv_types/src/share.rs new file mode 100644 index 00000000..3ae2f29a --- /dev/null +++ b/anchor/common/ssv_types/src/share.rs @@ -0,0 +1,30 @@ +use derive_more::{Deref, From}; +use types::{Address, Graffiti, PublicKey}; + +/// Index of the validator in the validator registry. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] +pub struct ValidatorIndex(pub usize); + +/// One of N shares of a split validator key. +#[derive(Debug, Clone)] +pub struct Share { + /// The public key of this Share + pub share_pubkey: PublicKey, + /// Metadata about the Validator this Share corresponds to + pub validator_metadata: ValidatorMetadata, +} + +/// General Metadata about a Validator +#[derive(Debug, Clone)] +pub struct ValidatorMetadata { + /// Index of the validator + pub validator_index: ValidatorIndex, + /// Public key of the validator + pub validator_pubkey: PublicKey, + /// Eth1 fee address + pub fee_recipient: Address, + /// Graffiti + pub graffiti: Graffiti, + /// The owner of the validator + pub owner: Address, +} diff --git a/anchor/common/ssv_types/src/util.rs b/anchor/common/ssv_types/src/util.rs new file mode 100644 index 00000000..4de43814 --- /dev/null +++ b/anchor/common/ssv_types/src/util.rs @@ -0,0 +1,29 @@ +use base64::prelude::*; +use rsa::pkcs8::DecodePublicKey; +use rsa::RsaPublicKey; + +// Parse from a RSA public key string into the associated RSA representation +pub fn parse_rsa(pem_data: &str) -> Result { + // First decode the base64 data + let pem_decoded = BASE64_STANDARD + .decode(pem_data) + .map_err(|e| format!("Unable to decode base64 pem data: {}", e))?; + + // Convert the decoded data to a string + let mut pem_string = String::from_utf8(pem_decoded) + .map_err(|e| format!("Unable to convert decoded pem data into a string: {}", e))?; + + // Fix the header - replace PKCS1 header with PKCS8 header + pem_string = pem_string + .replace( + "-----BEGIN RSA PUBLIC KEY-----", + "-----BEGIN PUBLIC KEY-----", + ) + .replace("-----END RSA PUBLIC KEY-----", "-----END PUBLIC KEY-----"); + + // Parse the PEM string into an RSA public key using PKCS8 format + let rsa_pubkey = RsaPublicKey::from_public_key_pem(&pem_string) + .map_err(|e| format!("Failed to parse RSA public key: {}", e))?; + + Ok(rsa_pubkey) +} From 017d287f23c4cff303bdfffdba5f0f200259d5fe Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 5 Dec 2024 15:18:51 +0000 Subject: [PATCH 02/50] From for operator conversion from db, operator db functionality & tests --- Cargo.lock | 76 +++++++++-- anchor/common/ssv_types/src/operator.rs | 41 ++++-- anchor/database/Cargo.toml | 18 +++ anchor/database/src/lib.rs | 128 ++++++++++++++++++ anchor/database/src/operator_operations.rs | 150 +++++++++++++++++++++ 5 files changed, 395 insertions(+), 18 deletions(-) create mode 100644 anchor/database/Cargo.toml create mode 100644 anchor/database/src/lib.rs create mode 100644 anchor/database/src/operator_operations.rs diff --git a/Cargo.lock b/Cargo.lock index 7b414109..f8598758 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -537,7 +537,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.37", + "rustix 0.38.41", "slab", "tracing", "windows-sys 0.59.0", @@ -1660,6 +1660,21 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "database" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "r2d2", + "r2d2_sqlite", + "rand", + "rsa", + "rusqlite", + "ssv_types", + "tempfile", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", +] + [[package]] name = "db-key" version = "0.0.5" @@ -4007,9 +4022,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.159" +version = "0.2.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libflate" @@ -5635,6 +5650,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der 0.7.9", + "pkcs8 0.10.2", + "spki 0.7.3", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -5677,7 +5703,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.37", + "rustix 0.38.41", "tracing", "windows-sys 0.59.0", ] @@ -6359,6 +6385,26 @@ dependencies = [ "archery", ] +[[package]] +name = "rsa" +version = "0.9.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47c75d7c5c6b673e58bf54d8544a9f432e3a925b0e80f7cd3602ab5c50c55519" +dependencies = [ + "const-oid", + "digest 0.10.7", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8 0.10.2", + "rand_core", + "signature 2.2.0", + "spki 0.7.3", + "subtle", + "zeroize", +] + [[package]] name = "rtnetlink" version = "0.10.1" @@ -6500,9 +6546,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.6.0", "errno", @@ -7278,6 +7324,16 @@ dependencies = [ "der 0.7.9", ] +[[package]] +name = "ssv_types" +version = "0.1.0" +dependencies = [ + "base64 0.22.1", + "derive_more 1.0.0", + "rsa", + "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", +] + [[package]] name = "ssz_types" version = "0.8.0" @@ -7546,14 +7602,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.37", + "rustix 0.38.41", "windows-sys 0.59.0", ] @@ -7574,7 +7630,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ - "rustix 0.38.37", + "rustix 0.38.41", "windows-sys 0.59.0", ] diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs index 70b96296..9023069a 100644 --- a/anchor/common/ssv_types/src/operator.rs +++ b/anchor/common/ssv_types/src/operator.rs @@ -1,13 +1,34 @@ use crate::util::parse_rsa; use derive_more::{Deref, From}; +use rsa::pkcs8::DecodePublicKey; use rsa::RsaPublicKey; use std::cmp::Eq; use std::fmt::Debug; use std::hash::Hash; +use std::str::FromStr; +use types::Address; + +/// From (id, pubkey, owner) for easy db converion +/// This should never fail as if it does it indicates some data corruption in the DB as the data is +/// saved from a previously constructed operator +impl From<(u64, String, String)> for Operator { + fn from(source: (u64, String, String)) -> Self { + let id: OperatorId = OperatorId(source.0); + let rsa_pubkey = RsaPublicKey::from_public_key_pem(&source.1) + .expect("Failed to parse String into RsaPublicKey"); + let owner: Address = + Address::from_str(&source.2).expect("Failed to parse String into Address"); + Operator { + id, + rsa_pubkey, + owner, + } + } +} /// Unique identifier for an Operator. #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] -pub struct OperatorId(u64); +pub struct OperatorId(pub u64); /// Client responsible for maintaining the overall health of the network. #[derive(Debug, Clone)] @@ -15,21 +36,24 @@ pub struct Operator { /// ID to uniquely identify this operator pub id: OperatorId, /// Base-64 encoded PEM RSA public key - pub public_key: RsaPublicKey, + pub rsa_pubkey: RsaPublicKey, + /// Owner of the operator + pub owner: Address, } impl Operator { /// Creates a new operator from its OperatorId and PEM-encoded public key string - pub fn new(pem_data: &str, operator_id: OperatorId) -> Result { + pub fn new(pem_data: &str, operator_id: OperatorId, owner: Address) -> Result { let rsa_pubkey = parse_rsa(pem_data)?; - Ok(Self::new_with_pubkey(rsa_pubkey, operator_id)) + Ok(Self::new_with_pubkey(rsa_pubkey, operator_id, owner)) } // Creates a new operator from an existing RSA public key and OperatorId - pub fn new_with_pubkey(rsa_pubkey: RsaPublicKey, operator_id: OperatorId) -> Self { + pub fn new_with_pubkey(rsa_pubkey: RsaPublicKey, id: OperatorId, owner: Address) -> Self { Self { - id: operator_id, - public_key: rsa_pubkey, + id, + rsa_pubkey, + owner, } } } @@ -43,8 +67,9 @@ mod operator_tests { // Random valid operator public key and id: https://explorer.ssv.network/operators/1141 let pem_data = "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBbFFmQVIzMEd4bFpacEwrNDByU0IKTEpSYlkwY2laZDBVMXhtTlp1bFB0NzZKQXJ5d2lia0Y4SFlQV2xkM3dERVdWZXZjRzRGVVBSZ0hDM1MrTHNuMwpVVC9TS280eE9nNFlnZ0xqbVVXQysyU3ZGRFhXYVFvdFRXYW5UU0drSEllNGFnTVNEYlUzOWhSMWdOSTJhY2NNCkVCcjU2eXpWcFMvKytkSk5xU002S1FQM3RnTU5ia2IvbEtlY0piTXM0ZWNRMTNkWUQwY3dFNFQxcEdTYUdhcEkKbFNaZ2lYd0cwSGFNTm5GUkt0OFlkZjNHaTFMRlh3Zlo5NHZFRjJMLzg3RCtidjdkSFVpSGRjRnh0Vm0rVjVvawo3VFptcnpVdXB2NWhKZ3lDVE9zc0xHOW1QSGNORnhEVDJ4NUJKZ2FFOVpJYnMrWVZ5a1k3UTE4VEhRS2lWcDFaCmp3SURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K"; let operator_id = 1141; + let address = Address::random(); - let operator = Operator::new(pem_data, operator_id.into()); + let operator = Operator::new(pem_data, operator_id.into(), address); assert!(operator.is_ok()); if let Ok(op) = operator { diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml new file mode 100644 index 00000000..41fe1928 --- /dev/null +++ b/anchor/database/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "database" +version = "0.1.0" +edition = { workspace = true } +authors = ["Sigma Prime "] + +[dependencies] +r2d2 = "0.8.10" +r2d2_sqlite = "0.21.0" +rusqlite = "0.28.0" +ssv_types = { workspace = true } +types = { workspace = true } +base64 = {workspace = true} +rsa = {workspace = true} + +[dev-dependencies] +rand = "0.8.5" +tempfile = "3.14.0" diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs new file mode 100644 index 00000000..a59ec08a --- /dev/null +++ b/anchor/database/src/lib.rs @@ -0,0 +1,128 @@ +use r2d2_sqlite::SqliteConnectionManager; +use rsa::RsaPublicKey; +use rusqlite::params; +use ssv_types::{Operator, OperatorId, Share}; +use std::collections::HashMap; +use std::fs::File; +use std::path::Path; +use std::time::Duration; +use types::{Address, PublicKey}; + +mod operator_operations; + +type Pool = r2d2::Pool; + +pub const POOL_SIZE: u32 = 1; +pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); + +#[derive(Debug, Clone)] +pub struct NetworkDatabase { + /// OperatorID => Operator + operators: HashMap, + /// ValidatorPublickKey => Share + shares: HashMap, + conn_pool: Pool, +} + +impl NetworkDatabase { + /// Open an existing database at the given `path`, or create one if none exists. + pub fn open_or_create(path: &Path) -> Result { + if path.exists() { + Self::open(path) + } else { + Self::create(path) + } + } + + fn connection(&self) -> Result, String> { + self.conn_pool + .get() + .map_err(|e| format!("Unable to get db connection: {:?}", e)) + } + + /// Create a `NetworkDatabase` at the given path. + pub fn create(path: &Path) -> Result { + let _file = File::options() + .write(true) + .read(true) + .create_new(true) + .open(path) + .map_err(|e| format!("Unable to create file at path {:?}: {}", path, e))?; + + // restrict file permissions + let conn_pool = Self::open_conn_pool(path)?; + let conn = conn_pool + .get() + .map_err(|e| format!("Unable to get connection to the database: {:?}", e))?; + + // Operator table + conn.execute( + "CREATE TABLE operators ( + operator_id INTEGER PRIMARY KEY, + public_key TEXT NOT NULL, + owner_address TEXT NOT NULL, + UNIQUE (public_key) + )", + params![], + ) + .map_err(|e| format!("Unable to create operators table in database: {:?}", e))?; + + Ok(Self { + operators: HashMap::new(), + shares: HashMap::new(), + conn_pool, + }) + } + + /// Open an existing `NetworkDatabase` from disk. + pub fn open(path: &Path) -> Result { + let conn_pool = Self::open_conn_pool(path)?; + + // Populate all in memory data w/ db connection + let operators = Self::populate_operators(&conn_pool); + let shares = Self::populate_shares(&conn_pool, &operators); + + let db = Self { + operators, + shares, + conn_pool, + }; + Ok(db) + } + + // populate in memory share store + fn populate_shares( + _conn: &Pool, + _operators: &HashMap, + ) -> HashMap { + todo!() + } + + // populate in memory operator store w/ existing database entries + fn populate_operators(_conn: &Pool) -> HashMap { + todo!() + } + + fn open_conn_pool(path: &Path) -> Result { + let manager = SqliteConnectionManager::file(path); + // some other args here + let conn_pool = Pool::builder() + .max_size(POOL_SIZE) + .connection_timeout(CONNECTION_TIMEOUT) + .build(manager) + .map_err(|e| format!("Unable to open database: {:?}", e))?; + Ok(conn_pool) + } +} + +#[cfg(test)] +mod database_test { + use super::*; + + #[test] + fn test_create_database() { + let path = Path::new("db"); + let db = NetworkDatabase::open_or_create(path); + assert!(db.is_ok()); + } +} diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs new file mode 100644 index 00000000..cc43095a --- /dev/null +++ b/anchor/database/src/operator_operations.rs @@ -0,0 +1,150 @@ +use super::NetworkDatabase; +use rsa::pkcs8::{EncodePublicKey, LineEnding}; +use rsa::RsaPublicKey; +use rusqlite::params; +use ssv_types::{Operator, OperatorId}; + +/// Implements all operator related functionality on the database +impl NetworkDatabase { + /// Insert a new operator into the database + pub fn insert_operator(&mut self, operator: &Operator) -> Result<(), String> { + let conn = self.connection()?; + + // encode data and insert into database + let encoded_pubkey = Self::encode_pubkey(&operator.rsa_pubkey); + let converted_address = operator.owner.to_string(); + conn.execute( + "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", + params![*operator.id, encoded_pubkey, converted_address], // Note: I also fixed the parameter order to match the columns + ) + .map_err(|e| format!("Failed to insert operator: {:?}", e))?; // Better error handling + + // then, store in memory + self.operators.insert(operator.id, operator.clone()); + Ok(()) + } + + /// Delete an operator + pub fn delete_operator(&mut self, id: OperatorId) -> Result<(), String> { + // make sure that it exists + if !self.operators.contains_key(&id) { + return Ok(()); + } + + // Remove from db and in memory + let conn = self.connection()?; + conn.execute("DELETE FROM operators WHERE operator_id = ?1", params![*id]) + .map_err(|e| format!("Failed to delete operator: {:?}", e))?; + self.operators.remove(&id); + Ok(()) + } + + /// Get operator data from in memory store + pub fn get_operator(&self, id: &OperatorId) -> Option { + self.operators.get(id).cloned() + } + + /// Check to see if the operator exists + pub fn operator_exists(&self, id: &OperatorId) -> bool { + self.operators.contains_key(id) + } + + // Helper to encode the RsaPublicKey to PEM string + fn encode_pubkey(pubkey: &RsaPublicKey) -> String { + // this should never fail as the key has already been validated upon construction + pubkey + .to_public_key_pem(LineEnding::default()) + .expect("Failed to encode RsaPublicKey") + } +} + +#[cfg(test)] +mod operator_database_tests { + use super::*; + use rsa::RsaPrivateKey; + use tempfile::tempdir; + use types::Address; + + // Generate random operator data + fn dummy_operator() -> Operator { + let op_id = OperatorId(10); + let address = Address::random(); + let _priv_key = RsaPrivateKey::new(&mut rand::thread_rng(), 2048).unwrap(); + let pubkey = RsaPublicKey::from(&_priv_key); + Operator::new_with_pubkey(pubkey, op_id, address) + } + + // fetch operator from database + fn get_operator_from_db(db: NetworkDatabase, id: OperatorId) -> Option { + let conn = db.connection().unwrap(); + let mut query = conn + .prepare("SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1") + .unwrap(); + let res: Option<(u64, String, String)> = query + .query_row(params![*id], |row| { + Ok(( + row.get(0).unwrap(), + row.get(1).unwrap(), + row.get(2).unwrap(), + )) + }) + .ok(); + res.map(|operator| operator.into()) + } + + #[test] + // Test inserting into the database and then confirming that it is both in + // memory and in the underlying database + fn test_insert_retrieve_operator() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::create(&file).unwrap(); + + // Insert dummy operator data into the database + let operator = dummy_operator(); + assert!(db.insert_operator(&operator).is_ok()); + + // Fetch operator from in memory store and confirm values + let fetched_operator = db.get_operator(&operator.id); + if let Some(op) = fetched_operator { + assert_eq!(op.id, operator.id); + assert_eq!(op.rsa_pubkey, operator.rsa_pubkey); + assert_eq!(op.owner, operator.owner); + } else { + panic!("Expected to find operator in memory"); + } + + // Check to make sure the operator is also in the underlying db + let db_operator = get_operator_from_db(db, operator.id); + if let Some(op) = db_operator { + assert_eq!(op.rsa_pubkey, operator.rsa_pubkey); + assert_eq!(op.id, operator.id); + assert_eq!(op.owner, operator.owner); + } else { + panic!("Expected to find operator in database"); + } + } + + #[test] + // Test deleting an operator and confirming it is gone from the db and in memory + fn test_insert_delete_operator() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::create(&file).unwrap(); + + // Insert dummy operator data into the database + let operator = dummy_operator(); + let _ = db.insert_operator(&operator); + + // Now, delete the operator + assert!(db.delete_operator(operator.id).is_ok()); + + // Confirm that is it removed from in memory + assert!(db.get_operator(&operator.id).is_none()); + + // Also confirm that it is removed from the database + assert!(get_operator_from_db(db, operator.id).is_none()); + } +} From 07e49b4523b19ef6a505dce1cd91bd990a367d06 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 5 Dec 2024 17:21:43 +0000 Subject: [PATCH 03/50] all tables added, operator tests fixed, share operations added, cluster operations added, in memory construction --- anchor/common/ssv_types/src/cluster.rs | 1 + anchor/database/src/cluster_operations.rs | 67 ++++++++++++++++++++ anchor/database/src/lib.rs | 68 +++++++++++++++++++-- anchor/database/src/operator_operations.rs | 4 +- anchor/database/src/share_operations.rs | 46 ++++++++++++++ anchor/database/src/validator_operations.rs | 16 +++++ 6 files changed, 195 insertions(+), 7 deletions(-) create mode 100644 anchor/database/src/cluster_operations.rs create mode 100644 anchor/database/src/share_operations.rs create mode 100644 anchor/database/src/validator_operations.rs diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 77d8f3ec..f8950d95 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -7,6 +7,7 @@ use derive_more::{Deref, From}; pub struct ClusterId(pub u64); /// A Cluster is a group of Operators that are acting on behalf of a Validator +#[derive(Debug, Clone)] pub struct Cluster { /// Unique identifier for a Cluster pub cluster_id: ClusterId, diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs new file mode 100644 index 00000000..e28e4af8 --- /dev/null +++ b/anchor/database/src/cluster_operations.rs @@ -0,0 +1,67 @@ +use crate::NetworkDatabase; +use rusqlite::{params, Transaction}; +use ssv_types::{Cluster, ClusterId, ClusterMember}; + +/// Implements all cluster related functionality on the database +impl NetworkDatabase { + /// Inserts a new cluster into the database + pub fn insert_cluster(&mut self, cluster: Cluster) -> Result<(), String> { + let mut conn = self.connection()?; + let tx = conn + .transaction() + .map_err(|e| format!("Unable to start a trnsaction: {:?}", e))?; + + // Insert the top level cluster data + tx.execute( + "INSERT INTO clusters (cluster_id, faulty) VALUES (?1, ?2)", + params![*cluster.cluster_id, 0], + ) + .map_err(|e| format!("Failed to insert cluster {:?}", e))?; + + // Now, insert all the cluster members + self.insert_cluster_members(&tx, &cluster.cluster_members)?; + + // Commit all operators to the db + tx.commit() + .map_err(|e| format!("Failed to commit transaction: {:?}", e))?; + + // Since we have committed, we can now store everything in memory and know it will be + // consistent + self.clusters.insert(cluster.cluster_id, cluster.clone()); + for member in cluster.cluster_members { + let key = member.share.share_pubkey.clone(); + self.shares.insert(key, member.share); + } + Ok(()) + } + + // Helper to insert all of the cluster members + fn insert_cluster_members( + &mut self, + tx: &Transaction<'_>, + cluster_members: &Vec, + ) -> Result<(), String> { + for member in cluster_members { + // insert the member + tx.execute( + "INSERT INTO clusters_members (cluster_id, operator_id) VALUES (?1, ?2)", + params![*member.cluster_id, *member.operator_id], + ) + .map_err(|e| format!("Failed to insert cluster member {:?}", e))?; + + // insert the members share + self.insert_share(tx, &member.share, &member.cluster_id, &member.operator_id)?; + } + Ok(()) + } + + // Fetch a cluster that we are in + pub fn get_cluster(&self, id: &ClusterId) -> Option { + self.clusters.get(id).cloned() + } + + /// Checks to see if we are a member of the cluster + pub fn member_of_cluster(&self, id: &ClusterId) -> bool { + self.clusters.contains_key(id) + } +} diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index a59ec08a..5d3478f7 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,14 +1,17 @@ use r2d2_sqlite::SqliteConnectionManager; -use rsa::RsaPublicKey; use rusqlite::params; +use ssv_types::{Cluster, ClusterId}; use ssv_types::{Operator, OperatorId, Share}; use std::collections::HashMap; use std::fs::File; use std::path::Path; use std::time::Duration; -use types::{Address, PublicKey}; +use types::PublicKey; +mod cluster_operations; mod operator_operations; +mod share_operations; +mod validator_operations; type Pool = r2d2::Pool; @@ -17,9 +20,8 @@ pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); #[derive(Debug, Clone)] pub struct NetworkDatabase { - /// OperatorID => Operator operators: HashMap, - /// ValidatorPublickKey => Share + clusters: HashMap, shares: HashMap, conn_pool: Pool, } @@ -67,8 +69,63 @@ impl NetworkDatabase { ) .map_err(|e| format!("Unable to create operators table in database: {:?}", e))?; + // Create clusters table - another parent table with no dependencies + conn.execute( + "CREATE TABLE clusters ( + cluster_id INTEGER PRIMARY KEY, + faulty INTEGER NOT NULL, + liquidated BOOLEAN DEFAULT FALSE + )", + params![], + ) + .map_err(|e| format!("Unable to create clusters table: {:?}", e))?; + + // Create cluster_members table - depends on both operators and clusters + conn.execute( + "CREATE TABLE cluster_members ( + cluster_id INTEGER NOT NULL, + operator_id INTEGER NOT NULL, + PRIMARY KEY (cluster_id, operator_id), + FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE, + FOREIGN KEY (operator_id) REFERENCES operators(operator_id) ON DELETE CASCADE + )", + params![], + ) + .map_err(|e| format!("Unable to create cluster_members table: {:?}", e))?; + + // Create validators table - depends on clusters + conn.execute( + "CREATE TABLE validators ( + validator_pubkey TEXT PRIMARY KEY, + cluster_id INTEGER NOT NULL, + fee_recipient TEXT, + graffiti BLOB, + validator_index INTEGER, + last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE + )", + params![], + ) + .map_err(|e| format!("Unable to create validators table: {:?}", e))?; + + // Create shares table - depends on validators and cluster_members + conn.execute( + "CREATE TABLE shares ( + validator_pubkey TEXT NOT NULL, + cluster_id INTEGER NOT NULL, + operator_id INTEGER NOT NULL, + share_pubkey TEXT, + PRIMARY KEY (validator_pubkey, operator_id), + FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE, + FOREIGN KEY (cluster_id, operator_id) REFERENCES cluster_members(cluster_id, operator_id) ON DELETE CASCADE, + FOREIGN KEY (validator_pubkey, cluster_id) REFERENCES validators(validator_pubkey, cluster_id) + )", + params![], + ).map_err(|e| format!("Unable to create shares table: {:?}", e))?; + Ok(Self { operators: HashMap::new(), + clusters: HashMap::new(), shares: HashMap::new(), conn_pool, }) @@ -84,7 +141,8 @@ impl NetworkDatabase { let db = Self { operators, - shares, + clusters: HashMap::new(), + shares: HashMap::new(), conn_pool, }; Ok(db) diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index cc43095a..4f1f8b95 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -15,9 +15,9 @@ impl NetworkDatabase { let converted_address = operator.owner.to_string(); conn.execute( "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", - params![*operator.id, encoded_pubkey, converted_address], // Note: I also fixed the parameter order to match the columns + params![*operator.id, encoded_pubkey, converted_address], ) - .map_err(|e| format!("Failed to insert operator: {:?}", e))?; // Better error handling + .map_err(|e| format!("Failed to insert operator: {:?}", e))?; // then, store in memory self.operators.insert(operator.id, operator.clone()); diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs new file mode 100644 index 00000000..604c6a4a --- /dev/null +++ b/anchor/database/src/share_operations.rs @@ -0,0 +1,46 @@ +use super::NetworkDatabase; +use rusqlite::{params, Transaction}; +use ssv_types::{ClusterId, OperatorId, Share}; +use types::PublicKey; + +/// Implements all Share related functionality on the database +impl NetworkDatabase { + pub(crate) fn insert_share( + &mut self, + tx: &Transaction<'_>, + share: &Share, + cluster_id: &ClusterId, + operator_id: &OperatorId, + ) -> Result<(), String> { + let cluster_id_i64: i64 = cluster_id.0 as i64; + let operator_id_i64: i64 = operator_id.0 as i64; + tx.execute( + "INSERT INTO shares ( + validator_pubkey, + cluster_id, + operator_id, + share_pubkey, + ) values (?1, ?2, ?3, ?4)", + params![ + share.validator_metadata.validator_pubkey.to_string(), + cluster_id_i64, + operator_id_i64, + share.share_pubkey.to_string(), + ], + ) + .map_err(|e| format!("Failed to insert share: {:?}", e))?; + + // TODO!(): Validator metadata insertion? + Ok(()) + } + + /// Get the share owned by the operator + pub fn get_share(&self, share_pubkey: &PublicKey) -> Option { + self.shares.get(share_pubkey).cloned() + } + + /// Check to see if our operator owns this share + pub fn operator_owns(&self, share_pubkey: &PublicKey) -> bool { + self.shares.contains_key(share_pubkey) + } +} diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs new file mode 100644 index 00000000..6079a24b --- /dev/null +++ b/anchor/database/src/validator_operations.rs @@ -0,0 +1,16 @@ +use crate::NetworkDatabase; + +/// Implements all validator related db functionality +impl NetworkDatabase { + pub fn insert_validator(&mut self) -> Result<(), String> { + todo!() + } + + pub fn delete_validator(&mut self) -> Result<(), String> { + todo!() + } + + pub fn get_validator_metadata(&self) { + todo!() + } +} From c14a2f436b6d292cb195a2054a6f4ae952cb61a3 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 5 Dec 2024 22:05:19 +0000 Subject: [PATCH 04/50] move metadata to cluster, insertion test passing, fix cascade deletion, move metadata insertion --- anchor/common/ssv_types/src/cluster.rs | 22 ++++ anchor/common/ssv_types/src/lib.rs | 4 +- anchor/common/ssv_types/src/share.rs | 26 +---- anchor/database/src/cluster_operations.rs | 107 ++++++++++++++++-- anchor/database/src/lib.rs | 13 ++- anchor/database/src/operator_operations.rs | 58 ++++------ anchor/database/src/share_operations.rs | 8 +- anchor/database/src/test_utils/mod.rs | 125 +++++++++++++++++++++ 8 files changed, 291 insertions(+), 72 deletions(-) create mode 100644 anchor/database/src/test_utils/mod.rs diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index f8950d95..308aee67 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -1,6 +1,7 @@ use crate::OperatorId; use crate::Share; use derive_more::{Deref, From}; +use types::{Address, Graffiti, PublicKey}; /// Unique identifier for a cluster #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] @@ -17,6 +18,8 @@ pub struct Cluster { pub faulty: u64, /// If the Cluster is liquidated or active pub liquidated: bool, + /// Metadata about the validator this committee represents + pub validator_metadata: ValidatorMetadata, } /// A member of a Cluster. This is just an Operator that holds onto a share of the Validator key @@ -29,3 +32,22 @@ pub struct ClusterMember { /// The Share this member is responsible for pub share: Share, } + +/// Index of the validator in the validator registry. +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] +pub struct ValidatorIndex(pub usize); + +/// General Metadata about a Validator +#[derive(Debug, Clone)] +pub struct ValidatorMetadata { + /// Index of the validator + pub validator_index: ValidatorIndex, + /// Public key of the validator + pub validator_pubkey: PublicKey, + /// Eth1 fee address + pub fee_recipient: Address, + /// Graffiti + pub graffiti: Graffiti, + /// The owner of the validator + pub owner: Address, +} diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index 271ecb1a..7f26161b 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -1,6 +1,6 @@ -pub use cluster::{Cluster, ClusterId, ClusterMember}; +pub use cluster::{Cluster, ClusterId, ClusterMember, ValidatorMetadata, ValidatorIndex}; pub use operator::{Operator, OperatorId}; -pub use share::{Share, ValidatorMetadata, ValidatorIndex}; +pub use share::Share; mod cluster; mod operator; mod share; diff --git a/anchor/common/ssv_types/src/share.rs b/anchor/common/ssv_types/src/share.rs index 3ae2f29a..b345edb1 100644 --- a/anchor/common/ssv_types/src/share.rs +++ b/anchor/common/ssv_types/src/share.rs @@ -1,30 +1,10 @@ -use derive_more::{Deref, From}; -use types::{Address, Graffiti, PublicKey}; - -/// Index of the validator in the validator registry. -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] -pub struct ValidatorIndex(pub usize); +use types::PublicKey; /// One of N shares of a split validator key. #[derive(Debug, Clone)] pub struct Share { /// The public key of this Share pub share_pubkey: PublicKey, - /// Metadata about the Validator this Share corresponds to - pub validator_metadata: ValidatorMetadata, -} - -/// General Metadata about a Validator -#[derive(Debug, Clone)] -pub struct ValidatorMetadata { - /// Index of the validator - pub validator_index: ValidatorIndex, - /// Public key of the validator - pub validator_pubkey: PublicKey, - /// Eth1 fee address - pub fee_recipient: Address, - /// Graffiti - pub graffiti: Graffiti, - /// The owner of the validator - pub owner: Address, + // Encrypted part + // todo!() } diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index e28e4af8..cf58b9ce 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,6 +1,7 @@ use crate::NetworkDatabase; use rusqlite::{params, Transaction}; use ssv_types::{Cluster, ClusterId, ClusterMember}; +use types::PublicKey; /// Implements all cluster related functionality on the database impl NetworkDatabase { @@ -9,7 +10,7 @@ impl NetworkDatabase { let mut conn = self.connection()?; let tx = conn .transaction() - .map_err(|e| format!("Unable to start a trnsaction: {:?}", e))?; + .map_err(|e| format!("Unable to start a transaction: {:?}", e))?; // Insert the top level cluster data tx.execute( @@ -18,8 +19,22 @@ impl NetworkDatabase { ) .map_err(|e| format!("Failed to insert cluster {:?}", e))?; + // Insert the validator metadata for the cluster + tx.execute( + "INSERT INTO validators (validator_pubkey, cluster_id) VALUES (?1, ?2)", + params![ + cluster.validator_metadata.validator_pubkey.to_string(), + *cluster.cluster_id + ], + ) + .map_err(|e| format!("Failed to insert cluster {:?}", e))?; + // Now, insert all the cluster members - self.insert_cluster_members(&tx, &cluster.cluster_members)?; + self.insert_cluster_members( + &tx, + &cluster.cluster_members, + &cluster.validator_metadata.validator_pubkey, + )?; // Commit all operators to the db tx.commit() @@ -40,18 +55,26 @@ impl NetworkDatabase { &mut self, tx: &Transaction<'_>, cluster_members: &Vec, + validator_pubkey: &PublicKey, ) -> Result<(), String> { for member in cluster_members { // insert the member tx.execute( - "INSERT INTO clusters_members (cluster_id, operator_id) VALUES (?1, ?2)", + "INSERT INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", params![*member.cluster_id, *member.operator_id], ) .map_err(|e| format!("Failed to insert cluster member {:?}", e))?; // insert the members share - self.insert_share(tx, &member.share, &member.cluster_id, &member.operator_id)?; + self.insert_share( + tx, + &member.share, + &member.cluster_id, + &member.operator_id, + validator_pubkey, + )?; } + Ok(()) } @@ -59,9 +82,79 @@ impl NetworkDatabase { pub fn get_cluster(&self, id: &ClusterId) -> Option { self.clusters.get(id).cloned() } +} + +#[cfg(test)] +mod cluster_database_tests { + use super::*; + use crate::test_utils::{ + dummy_cluster, dummy_operator, get_cluster_from_db, get_cluster_member_from_db, + }; + use tempfile::tempdir; + + #[test] + // Test inserting a cluster into the database + fn test_insert_retrieve_cluster() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::create(&file).unwrap(); + + // First insert the operators that will be part of the cluster + for i in 0..4 { + let operator = dummy_operator(i); + assert!(db.insert_operator(&operator).is_ok()); + } + + // Insert a dummy cluster + let cluster = dummy_cluster(4); + assert!(db.insert_cluster(cluster.clone()).is_ok()); + + // Verify cluster can be retrieved from memory + let retrieved = db.get_cluster(&cluster.cluster_id); + assert!(retrieved.is_some()); + + // Check to make sure the data is expected + let retrieved = retrieved.unwrap(); + assert_eq!(retrieved.cluster_id, cluster.cluster_id); + assert_eq!( + retrieved.cluster_members.len(), + cluster.cluster_members.len() + ); + assert_eq!(retrieved.faulty, cluster.faulty); + assert_eq!(retrieved.liquidated, cluster.liquidated); + + // Verify cluster is in the underlying database + let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); + assert!(cluster_row.is_some()); + let (db_cluster_id, db_faulty, db_liquidated) = cluster_row.unwrap(); + assert_eq!(db_cluster_id, *cluster.cluster_id as i64); + assert_eq!(db_faulty, cluster.faulty as i64); + assert_eq!(db_liquidated, cluster.liquidated); + + // Verify cluster members are in the underlying database + for member in &cluster.cluster_members { + let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); + assert!(member_row.is_some()); + let (db_cluster_id, db_operator_id) = member_row.unwrap(); + assert_eq!(db_cluster_id, *member.cluster_id as i64); + assert_eq!(db_operator_id, *member.operator_id as i64); + } + + // Verify that the shares are in the database + } + + #[test] + fn test_insert_cluster_without_operators() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::create(&file).unwrap(); + + // Try to insert a cluster without first inserting its operators + let cluster = dummy_cluster(4); - /// Checks to see if we are a member of the cluster - pub fn member_of_cluster(&self, id: &ClusterId) -> bool { - self.clusters.contains_key(id) + // This should fail because the operators don't exist in the database + assert!(db.insert_cluster(cluster).is_err()); } } diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 5d3478f7..9de6eb99 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -13,6 +13,16 @@ mod operator_operations; mod share_operations; mod validator_operations; +#[cfg(test)] +pub mod test_utils; + +// Todo +// 1) Decide on the types I want to use +// 2) Rebuilding after restart +// 3) Validator logic +// 4) To/From sql for all the types +// 5) Test + type Pool = r2d2::Pool; pub const POOL_SIZE: u32 = 1; @@ -116,9 +126,8 @@ impl NetworkDatabase { operator_id INTEGER NOT NULL, share_pubkey TEXT, PRIMARY KEY (validator_pubkey, operator_id), - FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE, FOREIGN KEY (cluster_id, operator_id) REFERENCES cluster_members(cluster_id, operator_id) ON DELETE CASCADE, - FOREIGN KEY (validator_pubkey, cluster_id) REFERENCES validators(validator_pubkey, cluster_id) + FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE )", params![], ).map_err(|e| format!("Unable to create shares table: {:?}", e))?; diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 4f1f8b95..1587c3f7 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -8,6 +8,11 @@ use ssv_types::{Operator, OperatorId}; impl NetworkDatabase { /// Insert a new operator into the database pub fn insert_operator(&mut self, operator: &Operator) -> Result<(), String> { + // make sure that this operator does not already exist + if self.operators.contains_key(&operator.id) { + return Ok(()); + } + let conn = self.connection()?; // encode data and insert into database @@ -59,38 +64,10 @@ impl NetworkDatabase { } #[cfg(test)] -mod operator_database_tests { +pub(crate) mod operator_database_tests { use super::*; - use rsa::RsaPrivateKey; + use crate::test_utils::{dummy_operator, get_operator_from_db}; use tempfile::tempdir; - use types::Address; - - // Generate random operator data - fn dummy_operator() -> Operator { - let op_id = OperatorId(10); - let address = Address::random(); - let _priv_key = RsaPrivateKey::new(&mut rand::thread_rng(), 2048).unwrap(); - let pubkey = RsaPublicKey::from(&_priv_key); - Operator::new_with_pubkey(pubkey, op_id, address) - } - - // fetch operator from database - fn get_operator_from_db(db: NetworkDatabase, id: OperatorId) -> Option { - let conn = db.connection().unwrap(); - let mut query = conn - .prepare("SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1") - .unwrap(); - let res: Option<(u64, String, String)> = query - .query_row(params![*id], |row| { - Ok(( - row.get(0).unwrap(), - row.get(1).unwrap(), - row.get(2).unwrap(), - )) - }) - .ok(); - res.map(|operator| operator.into()) - } #[test] // Test inserting into the database and then confirming that it is both in @@ -102,7 +79,7 @@ mod operator_database_tests { let mut db = NetworkDatabase::create(&file).unwrap(); // Insert dummy operator data into the database - let operator = dummy_operator(); + let operator = dummy_operator(1); assert!(db.insert_operator(&operator).is_ok()); // Fetch operator from in memory store and confirm values @@ -116,7 +93,7 @@ mod operator_database_tests { } // Check to make sure the operator is also in the underlying db - let db_operator = get_operator_from_db(db, operator.id); + let db_operator = get_operator_from_db(&db, operator.id); if let Some(op) = db_operator { assert_eq!(op.rsa_pubkey, operator.rsa_pubkey); assert_eq!(op.id, operator.id); @@ -135,7 +112,7 @@ mod operator_database_tests { let mut db = NetworkDatabase::create(&file).unwrap(); // Insert dummy operator data into the database - let operator = dummy_operator(); + let operator = dummy_operator(1); let _ = db.insert_operator(&operator); // Now, delete the operator @@ -145,6 +122,19 @@ mod operator_database_tests { assert!(db.get_operator(&operator.id).is_none()); // Also confirm that it is removed from the database - assert!(get_operator_from_db(db, operator.id).is_none()); + assert!(get_operator_from_db(&db, operator.id).is_none()); + } + + #[test] + // insert multiple operators + fn test_insert_multiple_operators() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::create(&file).unwrap(); + + for id in 0..4 { + let operator = dummy_operator(id); + assert!(db.insert_operator(&operator).is_ok()); + } } } diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index 604c6a4a..7aa41346 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -11,6 +11,7 @@ impl NetworkDatabase { share: &Share, cluster_id: &ClusterId, operator_id: &OperatorId, + validator_pubkey: &PublicKey, ) -> Result<(), String> { let cluster_id_i64: i64 = cluster_id.0 as i64; let operator_id_i64: i64 = operator_id.0 as i64; @@ -19,10 +20,10 @@ impl NetworkDatabase { validator_pubkey, cluster_id, operator_id, - share_pubkey, - ) values (?1, ?2, ?3, ?4)", + share_pubkey + ) VALUES (?1, ?2, ?3, ?4)", params![ - share.validator_metadata.validator_pubkey.to_string(), + validator_pubkey.to_string(), cluster_id_i64, operator_id_i64, share.share_pubkey.to_string(), @@ -30,7 +31,6 @@ impl NetworkDatabase { ) .map_err(|e| format!("Failed to insert share: {:?}", e))?; - // TODO!(): Validator metadata insertion? Ok(()) } diff --git a/anchor/database/src/test_utils/mod.rs b/anchor/database/src/test_utils/mod.rs new file mode 100644 index 00000000..f7de9be7 --- /dev/null +++ b/anchor/database/src/test_utils/mod.rs @@ -0,0 +1,125 @@ +use crate::NetworkDatabase; +use rand::Rng; +use rsa::RsaPrivateKey; +use rsa::RsaPublicKey; +use rusqlite::{params, OptionalExtension}; +use ssv_types::{ + Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, + ValidatorMetadata, +}; +use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; +use types::{Address, Graffiti, PublicKey}; + +// Generate a random PublicKey +pub fn random_pubkey() -> PublicKey { + let rng = &mut XorShiftRng::from_seed([42; 16]); + PublicKey::random_for_test(rng) +} + +// Generate random operator data +pub fn dummy_operator(id: u64) -> Operator { + let op_id = OperatorId(id); + let address = Address::random(); + let _priv_key = RsaPrivateKey::new(&mut rand::thread_rng(), 2048).unwrap(); + let pubkey = RsaPublicKey::from(&_priv_key); + Operator::new_with_pubkey(pubkey, op_id, address) +} + +// Generate a random Cluster +pub fn dummy_cluster(num_operators: u64) -> Cluster { + let cluster_id = ClusterId(rand::thread_rng().gen::().into()); + let mut members = Vec::new(); + + // Create members for the cluster + for i in 0..num_operators { + let member = dummy_cluster_member(cluster_id, OperatorId(i)); + members.push(member); + } + + Cluster { + cluster_id, + cluster_members: members, + faulty: 0, + liquidated: false, + validator_metadata: dummy_validator_metadata(), + } +} + +// Generate a random ClusterMember +pub fn dummy_cluster_member(cluster_id: ClusterId, operator_id: OperatorId) -> ClusterMember { + ClusterMember { + operator_id, + cluster_id, + share: dummy_share(), + } +} + +// Generate a random Share +pub fn dummy_share() -> Share { + Share { + share_pubkey: random_pubkey(), + } +} + +// Generate random validator metadata +pub fn dummy_validator_metadata() -> ValidatorMetadata { + ValidatorMetadata { + validator_index: ValidatorIndex(rand::thread_rng().gen::()), + validator_pubkey: random_pubkey(), + fee_recipient: Address::random(), + graffiti: Graffiti::default(), + owner: Address::random(), + } +} + +// Get an Operator from the database +pub fn get_operator_from_db(db: &NetworkDatabase, id: OperatorId) -> Option { + let conn = db.connection().unwrap(); + let mut query = conn + .prepare( + "SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1", + ) + .unwrap(); + let res: Option<(u64, String, String)> = query + .query_row(params![*id], |row| { + Ok(( + row.get(0).unwrap(), + row.get(1).unwrap(), + row.get(2).unwrap(), + )) + }) + .ok(); + res.map(|operator| operator.into()) +} + +// Get a cluster from the database +pub fn get_cluster_from_db(db: &NetworkDatabase, id: ClusterId) -> Option<(i64, i64, bool)> { + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare("SELECT cluster_id, faulty, liquidated FROM clusters WHERE cluster_id = ?1") + .unwrap(); + let cluster_row: Option<(i64, i64, bool)> = stmt + .query_row(params![*id], |row| { + Ok((row.get(0)?, row.get(1)?, row.get(2)?)) + }) + .optional() + .unwrap(); + cluster_row +} + +// Get a ClusterMember from the database +pub fn get_cluster_member_from_db( + db: &NetworkDatabase, + cluster_id: ClusterId, + operator_id: OperatorId, +) -> Option<(i64, i64)> { + let conn = db.connection().unwrap(); + let mut stmt = conn.prepare("SELECT cluster_id, operator_id FROM cluster_members WHERE cluster_id = ?1 AND operator_id = ?2").unwrap(); + let member_row: Option<(i64, i64)> = stmt + .query_row(params![*cluster_id, *operator_id], |row| { + Ok((row.get(0)?, row.get(1)?)) + }) + .optional() + .unwrap(); + member_row +} From 594bf364620229ac3ba72eac77a99797dcb21504 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 6 Dec 2024 13:47:17 +0000 Subject: [PATCH 05/50] proper error, migrate table to sql file --- Cargo.lock | 1 + Cargo.toml | 1 + anchor/common/ssv_types/Cargo.toml | 1 + anchor/database/Cargo.toml | 2 +- anchor/database/src/cluster_operations.rs | 26 ++-- anchor/database/src/error.rs | 35 +++++ anchor/database/src/lib.rs | 152 +++++++------------- anchor/database/src/operator_operations.rs | 14 +- anchor/database/src/share_operations.rs | 17 +-- anchor/database/src/table_schema.sql | 41 ++++++ anchor/database/src/validator_operations.rs | 6 +- 11 files changed, 155 insertions(+), 141 deletions(-) create mode 100644 anchor/database/src/error.rs create mode 100644 anchor/database/src/table_schema.sql diff --git a/Cargo.lock b/Cargo.lock index f8598758..485705b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7331,6 +7331,7 @@ dependencies = [ "base64 0.22.1", "derive_more 1.0.0", "rsa", + "rusqlite", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] diff --git a/Cargo.toml b/Cargo.toml index 19c2f647..0d172fc7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,7 @@ tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } rsa = { version = "0.9.7", features = ["pem"] } base64 = "0.22.1" +rusqlite = "0.28.0" [profile.maxperf] inherits = "release" diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml index c1c35dc5..85661b8e 100644 --- a/anchor/common/ssv_types/Cargo.toml +++ b/anchor/common/ssv_types/Cargo.toml @@ -9,3 +9,4 @@ types = { workspace = true} rsa = { workspace = true } derive_more = { workspace = true } base64 = { workspace = true } +rusqlite = { workspace = true } diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml index 41fe1928..04476ed1 100644 --- a/anchor/database/Cargo.toml +++ b/anchor/database/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Sigma Prime "] [dependencies] r2d2 = "0.8.10" r2d2_sqlite = "0.21.0" -rusqlite = "0.28.0" +rusqlite = { workspace = true} ssv_types = { workspace = true } types = { workspace = true } base64 = {workspace = true} diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index cf58b9ce..63685b6a 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,4 +1,4 @@ -use crate::NetworkDatabase; +use crate::{DatabaseError, NetworkDatabase}; use rusqlite::{params, Transaction}; use ssv_types::{Cluster, ClusterId, ClusterMember}; use types::PublicKey; @@ -6,18 +6,15 @@ use types::PublicKey; /// Implements all cluster related functionality on the database impl NetworkDatabase { /// Inserts a new cluster into the database - pub fn insert_cluster(&mut self, cluster: Cluster) -> Result<(), String> { + pub fn insert_cluster(&mut self, cluster: Cluster) -> Result<(), DatabaseError> { let mut conn = self.connection()?; - let tx = conn - .transaction() - .map_err(|e| format!("Unable to start a transaction: {:?}", e))?; + let tx = conn.transaction()?; // Insert the top level cluster data tx.execute( "INSERT INTO clusters (cluster_id, faulty) VALUES (?1, ?2)", params![*cluster.cluster_id, 0], - ) - .map_err(|e| format!("Failed to insert cluster {:?}", e))?; + )?; // Insert the validator metadata for the cluster tx.execute( @@ -26,8 +23,7 @@ impl NetworkDatabase { cluster.validator_metadata.validator_pubkey.to_string(), *cluster.cluster_id ], - ) - .map_err(|e| format!("Failed to insert cluster {:?}", e))?; + )?; // Now, insert all the cluster members self.insert_cluster_members( @@ -37,8 +33,7 @@ impl NetworkDatabase { )?; // Commit all operators to the db - tx.commit() - .map_err(|e| format!("Failed to commit transaction: {:?}", e))?; + tx.commit()?; // Since we have committed, we can now store everything in memory and know it will be // consistent @@ -56,21 +51,20 @@ impl NetworkDatabase { tx: &Transaction<'_>, cluster_members: &Vec, validator_pubkey: &PublicKey, - ) -> Result<(), String> { + ) -> Result<(), DatabaseError> { for member in cluster_members { // insert the member tx.execute( "INSERT INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", params![*member.cluster_id, *member.operator_id], - ) - .map_err(|e| format!("Failed to insert cluster member {:?}", e))?; + )?; // insert the members share self.insert_share( tx, &member.share, - &member.cluster_id, - &member.operator_id, + member.cluster_id, + member.operator_id, validator_pubkey, )?; } diff --git a/anchor/database/src/error.rs b/anchor/database/src/error.rs new file mode 100644 index 00000000..66be454e --- /dev/null +++ b/anchor/database/src/error.rs @@ -0,0 +1,35 @@ +use rusqlite::Error as SQLError; +use std::fmt::Display; +use std::io::{Error as IOError, ErrorKind}; + +#[derive(Debug)] +pub enum DatabaseError { + IOError(ErrorKind), + SQLError(String), + SQLPoolError(String), +} + +impl From for DatabaseError { + fn from(error: IOError) -> DatabaseError { + DatabaseError::IOError(error.kind()) + } +} + +impl From for DatabaseError { + fn from(error: SQLError) -> DatabaseError { + DatabaseError::SQLError(error.to_string()) + } +} + +impl From for DatabaseError { + fn from(error: r2d2::Error) -> Self { + // Use `Display` impl to print "timed out waiting for connection" + DatabaseError::SQLPoolError(format!("{}", error)) + } +} + +impl Display for DatabaseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self) + } +} diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 9de6eb99..f9245059 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,5 +1,4 @@ use r2d2_sqlite::SqliteConnectionManager; -use rusqlite::params; use ssv_types::{Cluster, ClusterId}; use ssv_types::{Operator, OperatorId, Share}; use std::collections::HashMap; @@ -9,10 +8,13 @@ use std::time::Duration; use types::PublicKey; mod cluster_operations; +pub mod error; mod operator_operations; mod share_operations; mod validator_operations; +pub use crate::error::DatabaseError; + #[cfg(test)] pub mod test_utils; @@ -28,17 +30,23 @@ type Pool = r2d2::Pool; pub const POOL_SIZE: u32 = 1; pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); +/// Top level NetworkDatabase that contains in memory storage to relevant information for quick +/// access and a connection to the underlying database #[derive(Debug, Clone)] pub struct NetworkDatabase { + /// All of the operators in the network operators: HashMap, + /// The clusters that this operator is a member in clusters: HashMap, + /// The shares that this operator is responsible for shares: HashMap, + /// Connection to the database conn_pool: Pool, } impl NetworkDatabase { /// Open an existing database at the given `path`, or create one if none exists. - pub fn open_or_create(path: &Path) -> Result { + pub fn open_or_create(path: &Path) -> Result { if path.exists() { Self::open(path) } else { @@ -46,91 +54,36 @@ impl NetworkDatabase { } } - fn connection(&self) -> Result, String> { - self.conn_pool - .get() - .map_err(|e| format!("Unable to get db connection: {:?}", e)) + // Open an existing `NetworkDatabase` from disk. + fn open(path: &Path) -> Result { + let conn_pool = Self::open_conn_pool(path)?; + + let db = Self { + operators: HashMap::new(), + clusters: HashMap::new(), + shares: HashMap::new(), + conn_pool, + }; + Ok(db) } /// Create a `NetworkDatabase` at the given path. - pub fn create(path: &Path) -> Result { + pub fn create(path: &Path) -> Result { let _file = File::options() .write(true) .read(true) .create_new(true) - .open(path) - .map_err(|e| format!("Unable to create file at path {:?}: {}", path, e))?; + .open(path)?; // restrict file permissions let conn_pool = Self::open_conn_pool(path)?; - let conn = conn_pool - .get() - .map_err(|e| format!("Unable to get connection to the database: {:?}", e))?; - - // Operator table - conn.execute( - "CREATE TABLE operators ( - operator_id INTEGER PRIMARY KEY, - public_key TEXT NOT NULL, - owner_address TEXT NOT NULL, - UNIQUE (public_key) - )", - params![], - ) - .map_err(|e| format!("Unable to create operators table in database: {:?}", e))?; - - // Create clusters table - another parent table with no dependencies - conn.execute( - "CREATE TABLE clusters ( - cluster_id INTEGER PRIMARY KEY, - faulty INTEGER NOT NULL, - liquidated BOOLEAN DEFAULT FALSE - )", - params![], - ) - .map_err(|e| format!("Unable to create clusters table: {:?}", e))?; - - // Create cluster_members table - depends on both operators and clusters - conn.execute( - "CREATE TABLE cluster_members ( - cluster_id INTEGER NOT NULL, - operator_id INTEGER NOT NULL, - PRIMARY KEY (cluster_id, operator_id), - FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE, - FOREIGN KEY (operator_id) REFERENCES operators(operator_id) ON DELETE CASCADE - )", - params![], - ) - .map_err(|e| format!("Unable to create cluster_members table: {:?}", e))?; - - // Create validators table - depends on clusters - conn.execute( - "CREATE TABLE validators ( - validator_pubkey TEXT PRIMARY KEY, - cluster_id INTEGER NOT NULL, - fee_recipient TEXT, - graffiti BLOB, - validator_index INTEGER, - last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE - )", - params![], - ) - .map_err(|e| format!("Unable to create validators table: {:?}", e))?; - - // Create shares table - depends on validators and cluster_members - conn.execute( - "CREATE TABLE shares ( - validator_pubkey TEXT NOT NULL, - cluster_id INTEGER NOT NULL, - operator_id INTEGER NOT NULL, - share_pubkey TEXT, - PRIMARY KEY (validator_pubkey, operator_id), - FOREIGN KEY (cluster_id, operator_id) REFERENCES cluster_members(cluster_id, operator_id) ON DELETE CASCADE, - FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE - )", - params![], - ).map_err(|e| format!("Unable to create shares table: {:?}", e))?; + let conn = conn_pool.get()?; + + // create all of the tables + conn.execute_batch(include_str!("table_schema.sql"))?; + + // populate stores + // todo!() Ok(Self { operators: HashMap::new(), @@ -140,44 +93,35 @@ impl NetworkDatabase { }) } - /// Open an existing `NetworkDatabase` from disk. - pub fn open(path: &Path) -> Result { - let conn_pool = Self::open_conn_pool(path)?; - - // Populate all in memory data w/ db connection - let operators = Self::populate_operators(&conn_pool); - let shares = Self::populate_shares(&conn_pool, &operators); - - let db = Self { - operators, - clusters: HashMap::new(), - shares: HashMap::new(), - conn_pool, - }; - Ok(db) + // Open a new connection + fn connection(&self) -> Result, DatabaseError> { + Ok(self.conn_pool.get()?) } - // populate in memory share store - fn populate_shares( - _conn: &Pool, - _operators: &HashMap, - ) -> HashMap { + // Populate in memory share store with the shares that this operator owns + fn populate_shares(_conn: &Pool) -> HashMap { todo!() } - // populate in memory operator store w/ existing database entries + // Populate the in memory operator store with all of the operators in the network fn populate_operators(_conn: &Pool) -> HashMap { todo!() } - fn open_conn_pool(path: &Path) -> Result { + // Populate the in memory cluster store with all of the clusters that this operator is a + // member of + fn populate_clusters(_conn: &Pool) -> HashMap { + todo!() + } + + /// Build a new connection pool + fn open_conn_pool(path: &Path) -> Result { let manager = SqliteConnectionManager::file(path); // some other args here let conn_pool = Pool::builder() .max_size(POOL_SIZE) .connection_timeout(CONNECTION_TIMEOUT) - .build(manager) - .map_err(|e| format!("Unable to open database: {:?}", e))?; + .build(manager)?; Ok(conn_pool) } } @@ -185,11 +129,13 @@ impl NetworkDatabase { #[cfg(test)] mod database_test { use super::*; + use tempfile::tempdir; #[test] fn test_create_database() { - let path = Path::new("db"); - let db = NetworkDatabase::open_or_create(path); + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let db = NetworkDatabase::open_or_create(&file); assert!(db.is_ok()); } } diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 1587c3f7..ff75ecce 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -1,4 +1,4 @@ -use super::NetworkDatabase; +use super::{DatabaseError, NetworkDatabase}; use rsa::pkcs8::{EncodePublicKey, LineEnding}; use rsa::RsaPublicKey; use rusqlite::params; @@ -7,22 +7,21 @@ use ssv_types::{Operator, OperatorId}; /// Implements all operator related functionality on the database impl NetworkDatabase { /// Insert a new operator into the database - pub fn insert_operator(&mut self, operator: &Operator) -> Result<(), String> { + pub fn insert_operator(&mut self, operator: &Operator) -> Result<(), DatabaseError> { // make sure that this operator does not already exist if self.operators.contains_key(&operator.id) { return Ok(()); } - let conn = self.connection()?; // encode data and insert into database let encoded_pubkey = Self::encode_pubkey(&operator.rsa_pubkey); let converted_address = operator.owner.to_string(); + conn.execute( "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", params![*operator.id, encoded_pubkey, converted_address], - ) - .map_err(|e| format!("Failed to insert operator: {:?}", e))?; + )?; // then, store in memory self.operators.insert(operator.id, operator.clone()); @@ -30,7 +29,7 @@ impl NetworkDatabase { } /// Delete an operator - pub fn delete_operator(&mut self, id: OperatorId) -> Result<(), String> { + pub fn delete_operator(&mut self, id: OperatorId) -> Result<(), DatabaseError> { // make sure that it exists if !self.operators.contains_key(&id) { return Ok(()); @@ -38,8 +37,7 @@ impl NetworkDatabase { // Remove from db and in memory let conn = self.connection()?; - conn.execute("DELETE FROM operators WHERE operator_id = ?1", params![*id]) - .map_err(|e| format!("Failed to delete operator: {:?}", e))?; + conn.execute("DELETE FROM operators WHERE operator_id = ?1", params![*id])?; self.operators.remove(&id); Ok(()) } diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index 7aa41346..a6ddd8fc 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -1,4 +1,4 @@ -use super::NetworkDatabase; +use super::{DatabaseError, NetworkDatabase}; use rusqlite::{params, Transaction}; use ssv_types::{ClusterId, OperatorId, Share}; use types::PublicKey; @@ -9,12 +9,10 @@ impl NetworkDatabase { &mut self, tx: &Transaction<'_>, share: &Share, - cluster_id: &ClusterId, - operator_id: &OperatorId, + cluster_id: ClusterId, + operator_id: OperatorId, validator_pubkey: &PublicKey, - ) -> Result<(), String> { - let cluster_id_i64: i64 = cluster_id.0 as i64; - let operator_id_i64: i64 = operator_id.0 as i64; + ) -> Result<(), DatabaseError> { tx.execute( "INSERT INTO shares ( validator_pubkey, @@ -24,12 +22,11 @@ impl NetworkDatabase { ) VALUES (?1, ?2, ?3, ?4)", params![ validator_pubkey.to_string(), - cluster_id_i64, - operator_id_i64, + *cluster_id, + *operator_id, share.share_pubkey.to_string(), ], - ) - .map_err(|e| format!("Failed to insert share: {:?}", e))?; + )?; Ok(()) } diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql new file mode 100644 index 00000000..4b141fa1 --- /dev/null +++ b/anchor/database/src/table_schema.sql @@ -0,0 +1,41 @@ +CREATE TABLE operators ( + operator_id INTEGER PRIMARY KEY, + public_key TEXT NOT NULL, + owner_address TEXT NOT NULL, + UNIQUE (public_key) +); + +CREATE TABLE clusters ( + cluster_id INTEGER PRIMARY KEY, + faulty INTEGER NOT NULL, + liquidated BOOLEAN DEFAULT FALSE +); + +CREATE TABLE cluster_members ( + cluster_id INTEGER NOT NULL, + operator_id INTEGER NOT NULL, + PRIMARY KEY (cluster_id, operator_id), + FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE, + FOREIGN KEY (operator_id) REFERENCES operators(operator_id) ON DELETE CASCADE +); + +CREATE TABLE validators ( + validator_pubkey TEXT PRIMARY KEY, + cluster_id INTEGER NOT NULL, + fee_recipient TEXT, + graffiti BLOB, + validator_index INTEGER, + last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE +); + +CREATE TABLE shares ( + validator_pubkey TEXT NOT NULL, + cluster_id INTEGER NOT NULL, + operator_id INTEGER NOT NULL, + share_pubkey TEXT, + PRIMARY KEY (validator_pubkey, operator_id), + FOREIGN KEY (cluster_id, operator_id) REFERENCES cluster_members(cluster_id, operator_id) ON DELETE CASCADE, + FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE +); + diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 6079a24b..7e8368a8 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -1,12 +1,12 @@ -use crate::NetworkDatabase; +use crate::{NetworkDatabase, DatabaseError}; /// Implements all validator related db functionality impl NetworkDatabase { - pub fn insert_validator(&mut self) -> Result<(), String> { + pub fn insert_validator(&mut self) -> Result<(), DatabaseError> { todo!() } - pub fn delete_validator(&mut self) -> Result<(), String> { + pub fn delete_validator(&mut self) -> Result<(), DatabaseError> { todo!() } From 1ac1b163d3bfdbba58171d651e33318f0aaf9b43 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 6 Dec 2024 15:10:41 +0000 Subject: [PATCH 06/50] more testing utils, cluster deletion cascade test passing --- anchor/database/src/cluster_operations.rs | 68 +++++++++- anchor/database/src/lib.rs | 36 ++--- anchor/database/src/test_utils/mod.rs | 143 ++++++++++++++++++++ anchor/database/src/validator_operations.rs | 2 +- 4 files changed, 220 insertions(+), 29 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 63685b6a..cb165fc0 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -10,13 +10,11 @@ impl NetworkDatabase { let mut conn = self.connection()?; let tx = conn.transaction()?; - // Insert the top level cluster data + // Insert the top level cluster data and associated validator metadata tx.execute( "INSERT INTO clusters (cluster_id, faulty) VALUES (?1, ?2)", params![*cluster.cluster_id, 0], )?; - - // Insert the validator metadata for the cluster tx.execute( "INSERT INTO validators (validator_pubkey, cluster_id) VALUES (?1, ?2)", params![ @@ -35,8 +33,7 @@ impl NetworkDatabase { // Commit all operators to the db tx.commit()?; - // Since we have committed, we can now store everything in memory and know it will be - // consistent + // Since we have successfully committed, we can now store everything in memory self.clusters.insert(cluster.cluster_id, cluster.clone()); for member in cluster.cluster_members { let key = member.share.share_pubkey.clone(); @@ -68,11 +65,28 @@ impl NetworkDatabase { validator_pubkey, )?; } + Ok(()) + } + + /// Delete a cluster from the database. This will cascade and delete all corresponding cluster + /// members, shares, and validator metadata + /// This corresponds to a validator being removed or exiting + pub fn delete_cluster(&mut self, id: ClusterId) -> Result<(), DatabaseError> { + // make sure this cluster exists + if !self.clusters.contains_key(&id) { + return Ok(()); + } + + let conn = self.connection()?; + conn.execute("DELETE FROM clusters WHERE cluster_id = ?1", params![*id])?; + // remove all in memory stores: todo!() need to figure out exactly how to structure in + // memory + let cluster = self.clusters.remove(&id); Ok(()) } - // Fetch a cluster that we are in + /// Fetch a cluster pub fn get_cluster(&self, id: &ClusterId) -> Option { self.clusters.get(id).cloned() } @@ -82,7 +96,8 @@ impl NetworkDatabase { mod cluster_database_tests { use super::*; use crate::test_utils::{ - dummy_cluster, dummy_operator, get_cluster_from_db, get_cluster_member_from_db, + db_with_cluster, dummy_cluster, dummy_operator, get_cluster_from_db, + get_cluster_member_from_db, get_shares_from_db, get_validator_from_db, }; use tempfile::tempdir; @@ -136,9 +151,16 @@ mod cluster_database_tests { } // Verify that the shares are in the database + let all_shares = get_shares_from_db(&db, cluster.cluster_id); + assert!(!all_shares.is_empty()); + + // Verify that the validator is in the database + let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); + assert!(get_validator_from_db(&db, &validator_pubkey_str).is_some()); } #[test] + /// Try inserting a cluster that does not already have registers operators in the database fn test_insert_cluster_without_operators() { // Create a temporary database let dir = tempdir().unwrap(); @@ -151,4 +173,36 @@ mod cluster_database_tests { // This should fail because the operators don't exist in the database assert!(db.insert_cluster(cluster).is_err()); } + + #[test] + fn test_delete_cluster() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::create(&file).unwrap(); + + // populate the db with operators and cluster + let cluster = db_with_cluster(&mut db); + + // Delete the cluster and then confirm it is gone from memory and dbb + assert!(db.delete_cluster(cluster.cluster_id).is_ok()); + + let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); + assert!(db.get_cluster(&cluster.cluster_id).is_none()); + assert!(cluster_row.is_none()); + + // Make sure all the members are gone + for member in &cluster.cluster_members { + let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); + assert!(member_row.is_none()); + } + + // Make sure all the shares are gone + let all_shares = get_shares_from_db(&db, cluster.cluster_id); + assert!(all_shares.is_empty()); + + // Make sure the validator this cluster represented is gone + let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); + assert!(get_validator_from_db(&db, &validator_pubkey_str).is_none()); + } } diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index f9245059..fc0bd85a 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -13,17 +13,10 @@ mod operator_operations; mod share_operations; mod validator_operations; -pub use crate::error::DatabaseError; - #[cfg(test)] pub mod test_utils; -// Todo -// 1) Decide on the types I want to use -// 2) Rebuilding after restart -// 3) Validator logic -// 4) To/From sql for all the types -// 5) Test +pub use crate::error::DatabaseError; type Pool = r2d2::Pool; @@ -58,6 +51,8 @@ impl NetworkDatabase { fn open(path: &Path) -> Result { let conn_pool = Self::open_conn_pool(path)?; + // todo!(): populate in memory stores + let db = Self { operators: HashMap::new(), clusters: HashMap::new(), @@ -82,8 +77,7 @@ impl NetworkDatabase { // create all of the tables conn.execute_batch(include_str!("table_schema.sql"))?; - // populate stores - // todo!() + // todo!() populate in memory stores Ok(Self { operators: HashMap::new(), @@ -93,6 +87,17 @@ impl NetworkDatabase { }) } + /// Build a new connection pool + fn open_conn_pool(path: &Path) -> Result { + let manager = SqliteConnectionManager::file(path); + // some other args here + let conn_pool = Pool::builder() + .max_size(POOL_SIZE) + .connection_timeout(CONNECTION_TIMEOUT) + .build(manager)?; + Ok(conn_pool) + } + // Open a new connection fn connection(&self) -> Result, DatabaseError> { Ok(self.conn_pool.get()?) @@ -113,17 +118,6 @@ impl NetworkDatabase { fn populate_clusters(_conn: &Pool) -> HashMap { todo!() } - - /// Build a new connection pool - fn open_conn_pool(path: &Path) -> Result { - let manager = SqliteConnectionManager::file(path); - // some other args here - let conn_pool = Pool::builder() - .max_size(POOL_SIZE) - .connection_timeout(CONNECTION_TIMEOUT) - .build(manager)?; - Ok(conn_pool) - } } #[cfg(test)] diff --git a/anchor/database/src/test_utils/mod.rs b/anchor/database/src/test_utils/mod.rs index f7de9be7..b188d608 100644 --- a/anchor/database/src/test_utils/mod.rs +++ b/anchor/database/src/test_utils/mod.rs @@ -72,6 +72,19 @@ pub fn dummy_validator_metadata() -> ValidatorMetadata { } } +// Construct a mock database with a cluster +pub fn db_with_cluster(db: &mut NetworkDatabase) -> Cluster { + for i in 0..4 { + let operator = dummy_operator(i); + db.insert_operator(&operator).unwrap(); + } + + // Insert a dummy cluster + let cluster = dummy_cluster(4); + db.insert_cluster(cluster.clone()).unwrap(); + cluster +} + // Get an Operator from the database pub fn get_operator_from_db(db: &NetworkDatabase, id: OperatorId) -> Option { let conn = db.connection().unwrap(); @@ -107,6 +120,50 @@ pub fn get_cluster_from_db(db: &NetworkDatabase, id: ClusterId) -> Option<(i64, cluster_row } +// Get all of the shares for a cluster +// Get all shares for a cluster +pub fn get_shares_from_db( + db: &NetworkDatabase, + cluster_id: ClusterId, +) -> Vec<(String, i64, i64, Option)> { + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare("SELECT validator_pubkey, cluster_id, operator_id, share_pubkey FROM shares WHERE cluster_id = ?1") + .unwrap(); + let shares = stmt + .query_map(params![*cluster_id], |row| { + Ok(( + row.get(0).unwrap(), + row.get(1).unwrap(), + row.get(2).unwrap(), + row.get(3).unwrap(), + )) + }) + .unwrap() + .map(|r| r.unwrap()) + .collect(); + shares +} + +// Get validator metadata from the database +pub fn get_validator_from_db( + db: &NetworkDatabase, + pubkey: &str, +) -> Option<(String, i64)> { + let conn = db.connection().unwrap(); + let mut stmt = conn + .prepare("SELECT validator_pubkey, cluster_id FROM validators WHERE validator_pubkey = ?1") + .unwrap(); + stmt.query_row(params![pubkey], |row| { + Ok(( + row.get(0)?, + row.get(1)?, + )) + }) + .optional() + .unwrap() +} + // Get a ClusterMember from the database pub fn get_cluster_member_from_db( db: &NetworkDatabase, @@ -123,3 +180,89 @@ pub fn get_cluster_member_from_db( .unwrap(); member_row } + +// Debug print the entire database. For testing purposes +pub fn debug_print_db(db: &NetworkDatabase) { + let conn = db.connection().unwrap(); + + println!("\n=== CLUSTERS ==="); + let mut stmt = conn.prepare("SELECT * FROM clusters").unwrap(); + let clusters = stmt + .query_map([], |row| { + Ok(format!( + "Cluster ID: {}, Faulty: {}, Liquidated: {}", + row.get::<_, i64>(0).unwrap(), + row.get::<_, i64>(1).unwrap(), + row.get::<_, bool>(2).unwrap() + )) + }) + .unwrap(); + for cluster in clusters { + println!("{}", cluster.unwrap()); + } + + println!("\n=== OPERATORS ==="); + let mut stmt = conn.prepare("SELECT * FROM operators").unwrap(); + let operators = stmt + .query_map([], |row| { + Ok(format!( + "Operator ID: {}, PublicKey: {}, Owner: {}", + row.get::<_, i64>(0).unwrap(), + row.get::<_, String>(1).unwrap(), + row.get::<_, String>(2).unwrap() + )) + }) + .unwrap(); + for operator in operators { + println!("{}", operator.unwrap()); + } + + println!("\n=== CLUSTER MEMBERS ==="); + let mut stmt = conn.prepare("SELECT * FROM cluster_members").unwrap(); + let members = stmt + .query_map([], |row| { + Ok(format!( + "Cluster ID: {}, Operator ID: {}", + row.get::<_, i64>(0).unwrap(), + row.get::<_, i64>(1).unwrap() + )) + }) + .unwrap(); + for member in members { + println!("{}", member.unwrap()); + } + + println!("\n=== VALIDATORS ==="); + let mut stmt = conn.prepare("SELECT * FROM validators").unwrap(); + let validators = stmt + .query_map([], |row| { + Ok(format!( + "Pubkey: {}, Cluster ID: {}, Fee Recipient: {:?}, Index: {:?}", + row.get::<_, String>(0).unwrap(), + row.get::<_, i64>(1).unwrap(), + row.get::<_, Option>(2).unwrap(), + row.get::<_, Option>(3).unwrap() + )) + }) + .unwrap(); + for validator in validators { + println!("{}", validator.unwrap()); + } + + println!("\n=== SHARES ==="); + let mut stmt = conn.prepare("SELECT * FROM shares").unwrap(); + let shares = stmt + .query_map([], |row| { + Ok(format!( + "Validator Pubkey: {}, Cluster ID: {}, Operator ID: {}, Share Pubkey: {:?}", + row.get::<_, String>(0).unwrap(), + row.get::<_, i64>(1).unwrap(), + row.get::<_, i64>(2).unwrap(), + row.get::<_, Option>(3).unwrap() + )) + }) + .unwrap(); + for share in shares { + println!("{}", share.unwrap()); + } +} diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 7e8368a8..6619c864 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -1,4 +1,4 @@ -use crate::{NetworkDatabase, DatabaseError}; +use crate::{DatabaseError, NetworkDatabase}; /// Implements all validator related db functionality impl NetworkDatabase { From 5bddc0bffbf1e182d4b7c80708f8e55edf57f026 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 6 Dec 2024 16:43:26 +0000 Subject: [PATCH 07/50] potential memory stores --- anchor/common/ssv_types/src/lib.rs | 2 +- anchor/database/src/cluster_operations.rs | 44 +++++++++++----------- anchor/database/src/lib.rs | 24 ++++++++---- anchor/database/src/operator_operations.rs | 2 +- anchor/database/src/share_operations.rs | 10 ----- anchor/database/src/test_utils/mod.rs | 16 ++------ 6 files changed, 45 insertions(+), 53 deletions(-) diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index 7f26161b..6d25f44d 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -1,4 +1,4 @@ -pub use cluster::{Cluster, ClusterId, ClusterMember, ValidatorMetadata, ValidatorIndex}; +pub use cluster::{Cluster, ClusterId, ClusterMember, ValidatorIndex, ValidatorMetadata}; pub use operator::{Operator, OperatorId}; pub use share::Share; mod cluster; diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index cb165fc0..34998601 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,6 +1,7 @@ use crate::{DatabaseError, NetworkDatabase}; use rusqlite::{params, Transaction}; use ssv_types::{Cluster, ClusterId, ClusterMember}; +use std::collections::{HashMap, HashSet}; use types::PublicKey; /// Implements all cluster related functionality on the database @@ -34,10 +35,21 @@ impl NetworkDatabase { tx.commit()?; // Since we have successfully committed, we can now store everything in memory - self.clusters.insert(cluster.cluster_id, cluster.clone()); + self.clusters.insert(cluster.cluster_id); + self.validator_metadata + .insert(cluster.cluster_id, cluster.validator_metadata); for member in cluster.cluster_members { - let key = member.share.share_pubkey.clone(); - self.shares.insert(key, member.share); + // Insert the share for this member + self.shares + .entry(cluster.cluster_id) + .or_insert_with(HashMap::new) + .insert(member.operator_id, member.share.clone()); + + // Insert the operators in this committee + self.cluster_members + .entry(cluster.cluster_id) + .or_insert_with(HashSet::new) + .insert(member.operator_id); } Ok(()) } @@ -73,7 +85,7 @@ impl NetworkDatabase { /// This corresponds to a validator being removed or exiting pub fn delete_cluster(&mut self, id: ClusterId) -> Result<(), DatabaseError> { // make sure this cluster exists - if !self.clusters.contains_key(&id) { + if !self.clusters.contains(&id) { return Ok(()); } @@ -86,9 +98,9 @@ impl NetworkDatabase { Ok(()) } - /// Fetch a cluster - pub fn get_cluster(&self, id: &ClusterId) -> Option { - self.clusters.get(id).cloned() + /// Check if this cluster exists + pub fn cluster_exists(&self, id: &ClusterId) -> bool { + self.clusters.contains(id) } } @@ -119,19 +131,9 @@ mod cluster_database_tests { let cluster = dummy_cluster(4); assert!(db.insert_cluster(cluster.clone()).is_ok()); - // Verify cluster can be retrieved from memory - let retrieved = db.get_cluster(&cluster.cluster_id); - assert!(retrieved.is_some()); - - // Check to make sure the data is expected - let retrieved = retrieved.unwrap(); - assert_eq!(retrieved.cluster_id, cluster.cluster_id); - assert_eq!( - retrieved.cluster_members.len(), - cluster.cluster_members.len() - ); - assert_eq!(retrieved.faulty, cluster.faulty); - assert_eq!(retrieved.liquidated, cluster.liquidated); + // Verify cluster is in memory + assert!(db.cluster_exists(&cluster.cluster_id)); + assert_eq!(db.cluster_members.len(), cluster.cluster_members.len()); // Verify cluster is in the underlying database let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); @@ -188,7 +190,7 @@ mod cluster_database_tests { assert!(db.delete_cluster(cluster.cluster_id).is_ok()); let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); - assert!(db.get_cluster(&cluster.cluster_id).is_none()); + assert!(!db.cluster_exists(&cluster.cluster_id)); assert!(cluster_row.is_none()); // Make sure all the members are gone diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index fc0bd85a..6fc2d646 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,7 +1,7 @@ use r2d2_sqlite::SqliteConnectionManager; -use ssv_types::{Cluster, ClusterId}; +use ssv_types::{Cluster, ClusterId, ValidatorMetadata}; use ssv_types::{Operator, OperatorId, Share}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::Path; use std::time::Duration; @@ -29,10 +29,14 @@ pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); pub struct NetworkDatabase { /// All of the operators in the network operators: HashMap, - /// The clusters that this operator is a member in - clusters: HashMap, - /// The shares that this operator is responsible for - shares: HashMap, + /// All of the clusters in the networ + clusters: HashSet, + /// Mapping of a cluster ID to its relevant Validator metadata + validator_metadata: HashMap, + /// Double layer share map from Cluster => Operator => Share + shares: HashMap>, + /// Maps a ClusterID to the operators in its cluster + cluster_members: HashMap>, /// Connection to the database conn_pool: Pool, } @@ -55,8 +59,10 @@ impl NetworkDatabase { let db = Self { operators: HashMap::new(), - clusters: HashMap::new(), + clusters: HashSet::new(), + validator_metadata: HashMap::new(), shares: HashMap::new(), + cluster_members: HashMap::new(), conn_pool, }; Ok(db) @@ -81,8 +87,10 @@ impl NetworkDatabase { Ok(Self { operators: HashMap::new(), - clusters: HashMap::new(), + clusters: HashSet::new(), + validator_metadata: HashMap::new(), shares: HashMap::new(), + cluster_members: HashMap::new(), conn_pool, }) } diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index ff75ecce..d42ef217 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -62,7 +62,7 @@ impl NetworkDatabase { } #[cfg(test)] -pub(crate) mod operator_database_tests { +mod operator_database_tests { use super::*; use crate::test_utils::{dummy_operator, get_operator_from_db}; use tempfile::tempdir; diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index a6ddd8fc..4b2b395b 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -30,14 +30,4 @@ impl NetworkDatabase { Ok(()) } - - /// Get the share owned by the operator - pub fn get_share(&self, share_pubkey: &PublicKey) -> Option { - self.shares.get(share_pubkey).cloned() - } - - /// Check to see if our operator owns this share - pub fn operator_owns(&self, share_pubkey: &PublicKey) -> bool { - self.shares.contains_key(share_pubkey) - } } diff --git a/anchor/database/src/test_utils/mod.rs b/anchor/database/src/test_utils/mod.rs index b188d608..f589dfef 100644 --- a/anchor/database/src/test_utils/mod.rs +++ b/anchor/database/src/test_utils/mod.rs @@ -146,22 +146,14 @@ pub fn get_shares_from_db( } // Get validator metadata from the database -pub fn get_validator_from_db( - db: &NetworkDatabase, - pubkey: &str, -) -> Option<(String, i64)> { +pub fn get_validator_from_db(db: &NetworkDatabase, pubkey: &str) -> Option<(String, i64)> { let conn = db.connection().unwrap(); let mut stmt = conn .prepare("SELECT validator_pubkey, cluster_id FROM validators WHERE validator_pubkey = ?1") .unwrap(); - stmt.query_row(params![pubkey], |row| { - Ok(( - row.get(0)?, - row.get(1)?, - )) - }) - .optional() - .unwrap() + stmt.query_row(params![pubkey], |row| Ok((row.get(0)?, row.get(1)?))) + .optional() + .unwrap() } // Get a ClusterMember from the database From ed67bd5a105b257a39b89cfe913ddfb2f3b126a6 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 6 Dec 2024 17:40:05 +0000 Subject: [PATCH 08/50] top level SQL statment defs with prepare cached --- anchor/database/src/cluster_operations.rs | 53 +++++++++++----------- anchor/database/src/lib.rs | 44 ++++++++++++++++++ anchor/database/src/operator_operations.rs | 21 ++++----- anchor/database/src/share_operations.rs | 15 ++---- 4 files changed, 84 insertions(+), 49 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 34998601..e510ff53 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,4 +1,4 @@ -use crate::{DatabaseError, NetworkDatabase}; +use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::{params, Transaction}; use ssv_types::{Cluster, ClusterId, ClusterMember}; use std::collections::{HashMap, HashSet}; @@ -12,17 +12,13 @@ impl NetworkDatabase { let tx = conn.transaction()?; // Insert the top level cluster data and associated validator metadata - tx.execute( - "INSERT INTO clusters (cluster_id, faulty) VALUES (?1, ?2)", - params![*cluster.cluster_id, 0], - )?; - tx.execute( - "INSERT INTO validators (validator_pubkey, cluster_id) VALUES (?1, ?2)", - params![ + tx.prepare_cached(SQL[&SqlStatement::InsertCluster])? + .execute(params![*cluster.cluster_id, 0])?; + tx.prepare_cached(SQL[&SqlStatement::InsertValidator])? + .execute(params![ cluster.validator_metadata.validator_pubkey.to_string(), *cluster.cluster_id - ], - )?; + ])?; // Now, insert all the cluster members self.insert_cluster_members( @@ -38,19 +34,20 @@ impl NetworkDatabase { self.clusters.insert(cluster.cluster_id); self.validator_metadata .insert(cluster.cluster_id, cluster.validator_metadata); + + let mut shares = HashMap::with_capacity(cluster.cluster_members.len()); + let mut members = HashSet::with_capacity(cluster.cluster_members.len()); + + // Process all members in a single iteration for member in cluster.cluster_members { - // Insert the share for this member - self.shares - .entry(cluster.cluster_id) - .or_insert_with(HashMap::new) - .insert(member.operator_id, member.share.clone()); - - // Insert the operators in this committee - self.cluster_members - .entry(cluster.cluster_id) - .or_insert_with(HashSet::new) - .insert(member.operator_id); + shares.insert(member.operator_id, member.share); + members.insert(member.operator_id); } + + // Bulk insert the processed data + self.shares.insert(cluster.cluster_id, shares); + self.cluster_members.insert(cluster.cluster_id, members); + Ok(()) } @@ -63,10 +60,8 @@ impl NetworkDatabase { ) -> Result<(), DatabaseError> { for member in cluster_members { // insert the member - tx.execute( - "INSERT INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", - params![*member.cluster_id, *member.operator_id], - )?; + tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? + .execute(params![*member.cluster_id, *member.operator_id])?; // insert the members share self.insert_share( @@ -90,7 +85,8 @@ impl NetworkDatabase { } let conn = self.connection()?; - conn.execute("DELETE FROM clusters WHERE cluster_id = ?1", params![*id])?; + conn.prepare_cached(SQL[&SqlStatement::DeleteCluster])? + .execute(params![*id])?; // remove all in memory stores: todo!() need to figure out exactly how to structure in // memory @@ -133,7 +129,10 @@ mod cluster_database_tests { // Verify cluster is in memory assert!(db.cluster_exists(&cluster.cluster_id)); - assert_eq!(db.cluster_members.len(), cluster.cluster_members.len()); + assert_eq!( + db.cluster_members[&cluster.cluster_id].len(), + cluster.cluster_members.len() + ); // Verify cluster is in the underlying database let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 6fc2d646..f83278af 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -4,6 +4,7 @@ use ssv_types::{Operator, OperatorId, Share}; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::Path; +use std::sync::LazyLock; use std::time::Duration; use types::PublicKey; @@ -23,6 +24,49 @@ type Pool = r2d2::Pool; pub const POOL_SIZE: u32 = 1; pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); +#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] +pub(crate) enum SqlStatement { + InsertOperator, + DeleteOperator, + InsertCluster, + InsertClusterMember, + DeleteCluster, + InsertShare, + InsertValidator, +} + +pub(crate) static SQL: LazyLock> = LazyLock::new(|| { + let mut m = HashMap::new(); + m.insert( + SqlStatement::InsertOperator, + "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", + ); + m.insert( + SqlStatement::DeleteOperator, + "DELETE FROM operators WHERE operator_id = ?1", + ); + m.insert( + SqlStatement::InsertCluster, + "INSERT INTO clusters (cluster_id, faulty) VALUES (?1, ?2)", + ); + m.insert( + SqlStatement::InsertClusterMember, + "INSERT INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", + ); + m.insert( + SqlStatement::DeleteCluster, + "DELETE FROM clusters WHERE cluster_id = ?1", + ); + m.insert(SqlStatement::InsertShare, + "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey) VALUES (?1, ?2, ?3, ?4)"); + m.insert( + SqlStatement::InsertValidator, + "INSERT INTO validators (validator_pubkey, cluster_id) VALUES (?1, ?2)", + ); + + m +}); + /// Top level NetworkDatabase that contains in memory storage to relevant information for quick /// access and a connection to the underlying database #[derive(Debug, Clone)] diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index d42ef217..dc7b2fba 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -1,4 +1,4 @@ -use super::{DatabaseError, NetworkDatabase}; +use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rsa::pkcs8::{EncodePublicKey, LineEnding}; use rsa::RsaPublicKey; use rusqlite::params; @@ -12,16 +12,14 @@ impl NetworkDatabase { if self.operators.contains_key(&operator.id) { return Ok(()); } - let conn = self.connection()?; - - // encode data and insert into database - let encoded_pubkey = Self::encode_pubkey(&operator.rsa_pubkey); - let converted_address = operator.owner.to_string(); - conn.execute( - "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", - params![*operator.id, encoded_pubkey, converted_address], - )?; + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::InsertOperator])? + .execute(params![ + *operator.id, + Self::encode_pubkey(&operator.rsa_pubkey), + operator.owner.to_string() + ])?; // then, store in memory self.operators.insert(operator.id, operator.clone()); @@ -37,7 +35,8 @@ impl NetworkDatabase { // Remove from db and in memory let conn = self.connection()?; - conn.execute("DELETE FROM operators WHERE operator_id = ?1", params![*id])?; + conn.prepare_cached(SQL[&SqlStatement::DeleteOperator])? + .execute(params![*id])?; self.operators.remove(&id); Ok(()) } diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index 4b2b395b..643c2c92 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -1,4 +1,4 @@ -use super::{DatabaseError, NetworkDatabase}; +use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::{params, Transaction}; use ssv_types::{ClusterId, OperatorId, Share}; use types::PublicKey; @@ -13,20 +13,13 @@ impl NetworkDatabase { operator_id: OperatorId, validator_pubkey: &PublicKey, ) -> Result<(), DatabaseError> { - tx.execute( - "INSERT INTO shares ( - validator_pubkey, - cluster_id, - operator_id, - share_pubkey - ) VALUES (?1, ?2, ?3, ?4)", - params![ + tx.prepare_cached(SQL[&SqlStatement::InsertShare])? + .execute(params![ validator_pubkey.to_string(), *cluster_id, *operator_id, share.share_pubkey.to_string(), - ], - )?; + ])?; Ok(()) } From d3469779952262e64c9015915228c14441cbe4da Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 6 Dec 2024 17:48:49 +0000 Subject: [PATCH 09/50] simplify member insertion --- anchor/database/src/cluster_operations.rs | 50 ++++++++--------------- 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index e510ff53..b9e8b1db 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,8 +1,7 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; -use rusqlite::{params, Transaction}; -use ssv_types::{Cluster, ClusterId, ClusterMember}; +use rusqlite::params; +use ssv_types::{Cluster, ClusterId}; use std::collections::{HashMap, HashSet}; -use types::PublicKey; /// Implements all cluster related functionality on the database impl NetworkDatabase { @@ -20,12 +19,21 @@ impl NetworkDatabase { *cluster.cluster_id ])?; - // Now, insert all the cluster members - self.insert_cluster_members( - &tx, - &cluster.cluster_members, - &cluster.validator_metadata.validator_pubkey, - )?; + // Insert all of the members + cluster.cluster_members.iter().try_for_each(|member| { + // insert the member + tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? + .execute(params![*member.cluster_id, *member.operator_id])?; + + // insert the members share + self.insert_share( + &tx, + &member.share, + member.cluster_id, + member.operator_id, + &cluster.validator_metadata.validator_pubkey, + ) + })?; // Commit all operators to the db tx.commit()?; @@ -51,30 +59,6 @@ impl NetworkDatabase { Ok(()) } - // Helper to insert all of the cluster members - fn insert_cluster_members( - &mut self, - tx: &Transaction<'_>, - cluster_members: &Vec, - validator_pubkey: &PublicKey, - ) -> Result<(), DatabaseError> { - for member in cluster_members { - // insert the member - tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? - .execute(params![*member.cluster_id, *member.operator_id])?; - - // insert the members share - self.insert_share( - tx, - &member.share, - member.cluster_id, - member.operator_id, - validator_pubkey, - )?; - } - Ok(()) - } - /// Delete a cluster from the database. This will cascade and delete all corresponding cluster /// members, shares, and validator metadata /// This corresponds to a validator being removed or exiting From 01fee71dd7567138775c0fc1492083c896d7053a Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 6 Dec 2024 21:02:41 +0000 Subject: [PATCH 10/50] flesh out some helpers --- anchor/database/src/cluster_operations.rs | 48 +++++-- anchor/database/src/error.rs | 2 + anchor/database/src/lib.rs | 134 +++++++++++--------- anchor/database/src/operator_operations.rs | 17 ++- anchor/database/src/share_operations.rs | 1 - anchor/database/src/table_schema.sql | 2 +- anchor/database/src/validator_operations.rs | 40 +++++- 7 files changed, 159 insertions(+), 85 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index b9e8b1db..59e6ea5b 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -19,13 +19,10 @@ impl NetworkDatabase { *cluster.cluster_id ])?; - // Insert all of the members + // Insert all of the members and their shares cluster.cluster_members.iter().try_for_each(|member| { - // insert the member tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? .execute(params![*member.cluster_id, *member.operator_id])?; - - // insert the members share self.insert_share( &tx, &member.share, @@ -35,7 +32,7 @@ impl NetworkDatabase { ) })?; - // Commit all operators to the db + // Commit all operations to the db tx.commit()?; // Since we have successfully committed, we can now store everything in memory @@ -59,13 +56,48 @@ impl NetworkDatabase { Ok(()) } + /// Mark the cluster as liquidated or active + pub fn update_status(&mut self, id: ClusterId, status: bool) -> Result<(), DatabaseError> { + if !self.clusters.contains(&id) { + return Err(DatabaseError::NotFound(format!( + "Cluster with id {} not in database", + *id + ))); + } + + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateClusterStatus])? + .execute(params![status, *id])?; + // todo!() change in memory status + Ok(()) + } + + /// Update the number of fauly nodes in the cluster + pub fn update_faulty(&mut self, id: ClusterId, num_faulty: u64) -> Result<(), DatabaseError> { + if !self.clusters.contains(&id) { + return Err(DatabaseError::NotFound(format!( + "Cluster with id {} not in database", + *id + ))); + } + + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateClusterFaulty])? + .execute(params![num_faulty, *id])?; + // todo!() change in memory status + Ok(()) + } + /// Delete a cluster from the database. This will cascade and delete all corresponding cluster /// members, shares, and validator metadata /// This corresponds to a validator being removed or exiting pub fn delete_cluster(&mut self, id: ClusterId) -> Result<(), DatabaseError> { - // make sure this cluster exists + // Make sure this cluster exists if !self.clusters.contains(&id) { - return Ok(()); + return Err(DatabaseError::NotFound(format!( + "Cluster with id {} not in database", + *id + ))); } let conn = self.connection()?; @@ -74,7 +106,7 @@ impl NetworkDatabase { // remove all in memory stores: todo!() need to figure out exactly how to structure in // memory - let cluster = self.clusters.remove(&id); + let _ = self.clusters.remove(&id); Ok(()) } diff --git a/anchor/database/src/error.rs b/anchor/database/src/error.rs index 66be454e..cbf8468a 100644 --- a/anchor/database/src/error.rs +++ b/anchor/database/src/error.rs @@ -4,6 +4,8 @@ use std::io::{Error as IOError, ErrorKind}; #[derive(Debug)] pub enum DatabaseError { + NotFound(String), + AlreadyPresent(String), IOError(ErrorKind), SQLError(String), SQLPoolError(String), diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index f83278af..63ca9fe4 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,12 +1,12 @@ +pub use crate::error::DatabaseError; use r2d2_sqlite::SqliteConnectionManager; -use ssv_types::{Cluster, ClusterId, ValidatorMetadata}; +use ssv_types::{ClusterId, ValidatorMetadata}; use ssv_types::{Operator, OperatorId, Share}; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::Path; use std::sync::LazyLock; use std::time::Duration; -use types::PublicKey; mod cluster_operations; pub mod error; @@ -17,58 +17,12 @@ mod validator_operations; #[cfg(test)] pub mod test_utils; -pub use crate::error::DatabaseError; - type Pool = r2d2::Pool; - pub const POOL_SIZE: u32 = 1; pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); -#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] -pub(crate) enum SqlStatement { - InsertOperator, - DeleteOperator, - InsertCluster, - InsertClusterMember, - DeleteCluster, - InsertShare, - InsertValidator, -} - -pub(crate) static SQL: LazyLock> = LazyLock::new(|| { - let mut m = HashMap::new(); - m.insert( - SqlStatement::InsertOperator, - "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", - ); - m.insert( - SqlStatement::DeleteOperator, - "DELETE FROM operators WHERE operator_id = ?1", - ); - m.insert( - SqlStatement::InsertCluster, - "INSERT INTO clusters (cluster_id, faulty) VALUES (?1, ?2)", - ); - m.insert( - SqlStatement::InsertClusterMember, - "INSERT INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", - ); - m.insert( - SqlStatement::DeleteCluster, - "DELETE FROM clusters WHERE cluster_id = ?1", - ); - m.insert(SqlStatement::InsertShare, - "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey) VALUES (?1, ?2, ?3, ?4)"); - m.insert( - SqlStatement::InsertValidator, - "INSERT INTO validators (validator_pubkey, cluster_id) VALUES (?1, ?2)", - ); - - m -}); - -/// Top level NetworkDatabase that contains in memory storage to relevant information for quick -/// access and a connection to the underlying database +/// Top level NetworkDatabase that contains in memory storage for quick access +/// to relevant information and a connection to the database #[derive(Debug, Clone)] pub struct NetworkDatabase { /// All of the operators in the network @@ -154,24 +108,78 @@ impl NetworkDatabase { fn connection(&self) -> Result, DatabaseError> { Ok(self.conn_pool.get()?) } +} - // Populate in memory share store with the shares that this operator owns - fn populate_shares(_conn: &Pool) -> HashMap { - todo!() - } +// Wrappers around various SQL statements used for interacting with the db +#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] +pub(crate) enum SqlStatement { + InsertOperator, + DeleteOperator, - // Populate the in memory operator store with all of the operators in the network - fn populate_operators(_conn: &Pool) -> HashMap { - todo!() - } + InsertCluster, + InsertClusterMember, + UpdateClusterStatus, + UpdateClusterFaulty, + DeleteCluster, - // Populate the in memory cluster store with all of the clusters that this operator is a - // member of - fn populate_clusters(_conn: &Pool) -> HashMap { - todo!() - } + InsertShare, + InsertValidator, + UpdateFeeRecipient, + SetGraffiti, + SetValidatorIndex, } +pub(crate) static SQL: LazyLock> = LazyLock::new(|| { + let mut m = HashMap::new(); + m.insert( + SqlStatement::InsertOperator, + "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", + ); + m.insert( + SqlStatement::DeleteOperator, + "DELETE FROM operators WHERE operator_id = ?1", + ); + m.insert( + SqlStatement::InsertCluster, + "INSERT INTO clusters (cluster_id) VALUES (?1)", + ); + m.insert( + SqlStatement::UpdateClusterStatus, + "UPDATE clusters SET liquidated = ?1 WHERE cluster_id = ?2", + ); + m.insert( + SqlStatement::UpdateClusterFaulty, + "UPDATE clusters SET faulty = ?1 WHERE cluster_id = ?2", + ); + m.insert( + SqlStatement::InsertClusterMember, + "INSERT INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", + ); + m.insert( + SqlStatement::DeleteCluster, + "DELETE FROM clusters WHERE cluster_id = ?1", + ); + m.insert(SqlStatement::InsertShare, + "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey) VALUES (?1, ?2, ?3, ?4)"); + m.insert( + SqlStatement::InsertValidator, + "INSERT INTO validators (validator_pubkey, cluster_id) VALUES (?1, ?2)", + ); + m.insert( + SqlStatement::UpdateFeeRecipient, + "UPDATE validators SET fee_recipient = ?1 WHERE validator_pubkey = ?2", + ); + m.insert( + SqlStatement::SetGraffiti, + "UPDATE validators SET graffiti = ?1 WHERE validator_pubkey = ?2", + ); + m.insert( + SqlStatement::SetValidatorIndex, + "UPDATE validators SET validator_index = ?1 WHERE validator_pubkey = ?2", + ); + m +}); + #[cfg(test)] mod database_test { use super::*; diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index dc7b2fba..288d6068 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -10,7 +10,10 @@ impl NetworkDatabase { pub fn insert_operator(&mut self, operator: &Operator) -> Result<(), DatabaseError> { // make sure that this operator does not already exist if self.operators.contains_key(&operator.id) { - return Ok(()); + return Err(DatabaseError::NotFound(format!( + "Operator with id {} not in database", + *operator.id + ))); } let conn = self.connection()?; @@ -30,10 +33,14 @@ impl NetworkDatabase { pub fn delete_operator(&mut self, id: OperatorId) -> Result<(), DatabaseError> { // make sure that it exists if !self.operators.contains_key(&id) { - return Ok(()); + return Err(DatabaseError::NotFound(format!( + "Operator with id {} not in database", + *id + ))); } - // Remove from db and in memory + // Remove from db and in memory. This should cascade to delete this operator from all of the + // clusters that it is in and all of the shares that it owns let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::DeleteOperator])? .execute(params![*id])?; @@ -42,8 +49,8 @@ impl NetworkDatabase { } /// Get operator data from in memory store - pub fn get_operator(&self, id: &OperatorId) -> Option { - self.operators.get(id).cloned() + pub fn get_operator(&self, id: &OperatorId) -> Option<&Operator> { + self.operators.get(id) } /// Check to see if the operator exists diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index 643c2c92..11e13f76 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -20,7 +20,6 @@ impl NetworkDatabase { *operator_id, share.share_pubkey.to_string(), ])?; - Ok(()) } } diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 4b141fa1..50e73bc8 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -7,7 +7,7 @@ CREATE TABLE operators ( CREATE TABLE clusters ( cluster_id INTEGER PRIMARY KEY, - faulty INTEGER NOT NULL, + faulty INTEGER DEFAULT 0, liquidated BOOLEAN DEFAULT FALSE ); diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 6619c864..dba45599 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -1,16 +1,42 @@ -use crate::{DatabaseError, NetworkDatabase}; +use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; +use rusqlite::params; +use ssv_types::{ClusterId, ValidatorIndex, ValidatorMetadata}; +use types::{Address, PublicKey}; /// Implements all validator related db functionality impl NetworkDatabase { - pub fn insert_validator(&mut self) -> Result<(), DatabaseError> { - todo!() + /// Populates or updates the fee recipient for the validator + pub fn update_fee_recipient( + &mut self, + validator_pubkey: PublicKey, + fee_recipient: Address, + ) -> Result<(), DatabaseError> { + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? + .execute(params![ + validator_pubkey.to_string(), + fee_recipient.to_string() + ])?; + Ok(()) } - pub fn delete_validator(&mut self) -> Result<(), DatabaseError> { - todo!() + /// Set the index of the validator + pub fn set_validator_index( + &mut self, + validator_pubkey: PublicKey, + index: ValidatorIndex, + ) -> Result<(), DatabaseError> { + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::SetValidatorIndex])? + .execute(params![validator_pubkey.to_string(), *index])?; + Ok(()) } - pub fn get_validator_metadata(&self) { - todo!() + /// Get the metatdata for the cluster + pub fn get_validator_metadata(&self, id: &ClusterId) -> Option<&ValidatorMetadata> { + self.validator_metadata.get(id) } } + +#[cfg(test)] +mod validator_database_tests {} From a1ca54e1bb152d6689426a2ee5e56ff4a7bb2985 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 9 Dec 2024 13:56:59 +0000 Subject: [PATCH 11/50] migrate from rsa to openssl for rsa keys --- Cargo.lock | 35 ++-------------------- Cargo.toml | 2 +- anchor/common/ssv_types/Cargo.toml | 2 +- anchor/common/ssv_types/src/operator.rs | 10 +++---- anchor/common/ssv_types/src/share.rs | 2 -- anchor/common/ssv_types/src/util.rs | 8 ++--- anchor/database/Cargo.toml | 4 +-- anchor/database/src/operator_operations.rs | 13 ++++---- anchor/database/src/table_schema.sql | 2 +- anchor/database/src/test_utils/mod.rs | 11 +++---- 10 files changed, 30 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f94d027a..636571a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1667,10 +1667,10 @@ name = "database" version = "0.1.0" dependencies = [ "base64 0.22.1", + "openssl", "r2d2", "r2d2_sqlite", "rand", - "rsa", "rusqlite", "ssv_types", "tempfile", @@ -5847,17 +5847,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs1" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" -dependencies = [ - "der 0.7.9", - "pkcs8 0.10.2", - "spki 0.7.3", -] - [[package]] name = "pkcs8" version = "0.9.0" @@ -6595,26 +6584,6 @@ dependencies = [ "archery", ] -[[package]] -name = "rsa" -version = "0.9.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c75d7c5c6b673e58bf54d8544a9f432e3a925b0e80f7cd3602ab5c50c55519" -dependencies = [ - "const-oid", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-traits", - "pkcs1", - "pkcs8 0.10.2", - "rand_core", - "signature 2.2.0", - "spki 0.7.3", - "subtle", - "zeroize", -] - [[package]] name = "rtnetlink" version = "0.13.1" @@ -7546,7 +7515,7 @@ version = "0.1.0" dependencies = [ "base64 0.22.1", "derive_more 1.0.0", - "rsa", + "openssl", "rusqlite", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", ] diff --git a/Cargo.toml b/Cargo.toml index 7a88f734..4897409a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,9 +58,9 @@ tokio = { version = "1.39.2", features = [ ] } tracing = "0.1.40" tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } -rsa = { version = "0.9.7", features = ["pem"] } base64 = "0.22.1" rusqlite = "0.28.0" +openssl = "0.10.68" [profile.maxperf] inherits = "release" diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml index 85661b8e..2d07f7ed 100644 --- a/anchor/common/ssv_types/Cargo.toml +++ b/anchor/common/ssv_types/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Sigma Prime "] [dependencies] types = { workspace = true} -rsa = { workspace = true } +openssl = { workspace = true } derive_more = { workspace = true } base64 = { workspace = true } rusqlite = { workspace = true } diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs index 9023069a..ee937c32 100644 --- a/anchor/common/ssv_types/src/operator.rs +++ b/anchor/common/ssv_types/src/operator.rs @@ -1,7 +1,7 @@ use crate::util::parse_rsa; use derive_more::{Deref, From}; -use rsa::pkcs8::DecodePublicKey; -use rsa::RsaPublicKey; +use openssl::pkey::Public; +use openssl::rsa::Rsa; use std::cmp::Eq; use std::fmt::Debug; use std::hash::Hash; @@ -14,7 +14,7 @@ use types::Address; impl From<(u64, String, String)> for Operator { fn from(source: (u64, String, String)) -> Self { let id: OperatorId = OperatorId(source.0); - let rsa_pubkey = RsaPublicKey::from_public_key_pem(&source.1) + let rsa_pubkey = Rsa::public_key_from_pem(source.1.as_bytes()) .expect("Failed to parse String into RsaPublicKey"); let owner: Address = Address::from_str(&source.2).expect("Failed to parse String into Address"); @@ -36,7 +36,7 @@ pub struct Operator { /// ID to uniquely identify this operator pub id: OperatorId, /// Base-64 encoded PEM RSA public key - pub rsa_pubkey: RsaPublicKey, + pub rsa_pubkey: Rsa, /// Owner of the operator pub owner: Address, } @@ -49,7 +49,7 @@ impl Operator { } // Creates a new operator from an existing RSA public key and OperatorId - pub fn new_with_pubkey(rsa_pubkey: RsaPublicKey, id: OperatorId, owner: Address) -> Self { + pub fn new_with_pubkey(rsa_pubkey: Rsa, id: OperatorId, owner: Address) -> Self { Self { id, rsa_pubkey, diff --git a/anchor/common/ssv_types/src/share.rs b/anchor/common/ssv_types/src/share.rs index b345edb1..335af5ed 100644 --- a/anchor/common/ssv_types/src/share.rs +++ b/anchor/common/ssv_types/src/share.rs @@ -5,6 +5,4 @@ use types::PublicKey; pub struct Share { /// The public key of this Share pub share_pubkey: PublicKey, - // Encrypted part - // todo!() } diff --git a/anchor/common/ssv_types/src/util.rs b/anchor/common/ssv_types/src/util.rs index 4de43814..ef972e15 100644 --- a/anchor/common/ssv_types/src/util.rs +++ b/anchor/common/ssv_types/src/util.rs @@ -1,9 +1,9 @@ use base64::prelude::*; -use rsa::pkcs8::DecodePublicKey; -use rsa::RsaPublicKey; +use openssl::pkey::Public; +use openssl::rsa::Rsa; // Parse from a RSA public key string into the associated RSA representation -pub fn parse_rsa(pem_data: &str) -> Result { +pub fn parse_rsa(pem_data: &str) -> Result, String> { // First decode the base64 data let pem_decoded = BASE64_STANDARD .decode(pem_data) @@ -22,7 +22,7 @@ pub fn parse_rsa(pem_data: &str) -> Result { .replace("-----END RSA PUBLIC KEY-----", "-----END PUBLIC KEY-----"); // Parse the PEM string into an RSA public key using PKCS8 format - let rsa_pubkey = RsaPublicKey::from_public_key_pem(&pem_string) + let rsa_pubkey = Rsa::public_key_from_pem(pem_string.as_bytes()) .map_err(|e| format!("Failed to parse RSA public key: {}", e))?; Ok(rsa_pubkey) diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml index 04476ed1..e23fee8b 100644 --- a/anchor/database/Cargo.toml +++ b/anchor/database/Cargo.toml @@ -10,8 +10,8 @@ r2d2_sqlite = "0.21.0" rusqlite = { workspace = true} ssv_types = { workspace = true } types = { workspace = true } -base64 = {workspace = true} -rsa = {workspace = true} +base64 = { workspace = true } +openssl = { workspace = true } [dev-dependencies] rand = "0.8.5" diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 288d6068..8da1aafa 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -1,6 +1,7 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; -use rsa::pkcs8::{EncodePublicKey, LineEnding}; -use rsa::RsaPublicKey; +use openssl::pkey::Public; +use openssl::rsa::Rsa; + use rusqlite::params; use ssv_types::{Operator, OperatorId}; @@ -58,15 +59,16 @@ impl NetworkDatabase { self.operators.contains_key(id) } - // Helper to encode the RsaPublicKey to PEM string - fn encode_pubkey(pubkey: &RsaPublicKey) -> String { + // Helper to encode the RsaPublicKey to PEM + fn encode_pubkey(pubkey: &Rsa) -> Vec { // this should never fail as the key has already been validated upon construction pubkey - .to_public_key_pem(LineEnding::default()) + .public_key_to_pem() .expect("Failed to encode RsaPublicKey") } } +/* #[cfg(test)] mod operator_database_tests { use super::*; @@ -142,3 +144,4 @@ mod operator_database_tests { } } } +*/ diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 50e73bc8..bb3bad0d 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -1,6 +1,6 @@ CREATE TABLE operators ( operator_id INTEGER PRIMARY KEY, - public_key TEXT NOT NULL, + public_key BLOB NOT NULL, owner_address TEXT NOT NULL, UNIQUE (public_key) ); diff --git a/anchor/database/src/test_utils/mod.rs b/anchor/database/src/test_utils/mod.rs index f589dfef..4ff7a37d 100644 --- a/anchor/database/src/test_utils/mod.rs +++ b/anchor/database/src/test_utils/mod.rs @@ -1,7 +1,6 @@ use crate::NetworkDatabase; +use openssl::rsa::Rsa; use rand::Rng; -use rsa::RsaPrivateKey; -use rsa::RsaPublicKey; use rusqlite::{params, OptionalExtension}; use ssv_types::{ Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, @@ -20,9 +19,11 @@ pub fn random_pubkey() -> PublicKey { pub fn dummy_operator(id: u64) -> Operator { let op_id = OperatorId(id); let address = Address::random(); - let _priv_key = RsaPrivateKey::new(&mut rand::thread_rng(), 2048).unwrap(); - let pubkey = RsaPublicKey::from(&_priv_key); - Operator::new_with_pubkey(pubkey, op_id, address) + //let pubkey = Rsa::generate(2048).unwrap().public_key_to_pem(); + let _priv_key = Rsa::generate(2048).unwrap(); + let public_key = _priv_key.public_key_to_pem().unwrap(); + let public_key = Rsa::public_key_from_pem(&public_key).unwrap(); + Operator::new_with_pubkey(public_key, op_id, address) } // Generate a random Cluster From 54a19daf361363d3548c8cbf6bfa12c5b5a6191e Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 9 Dec 2024 14:23:59 +0000 Subject: [PATCH 12/50] migrate and fix tests --- Cargo.lock | 149 ++++++++++----------- Cargo.toml | 1 + anchor/common/ssv_types/src/operator.rs | 4 +- anchor/database/src/cluster_operations.rs | 2 +- anchor/database/src/operator_operations.rs | 24 ++-- anchor/database/src/table_schema.sql | 2 +- 6 files changed, 96 insertions(+), 86 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 636571a9..e02a1625 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.14" +version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9db948902dfbae96a73c2fbf1f7abec62af034ab883e4c777c3fd29702bd6e2c" +checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" dependencies = [ "alloy-rlp", "arbitrary", @@ -201,9 +201,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0822426598f95e45dd1ea32a738dac057529a709ee645fcc516ffa4cbde08f" +checksum = "f542548a609dca89fcd72b3b9f355928cf844d4363c5eed9c5273a3dd225e097" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -212,9 +212,9 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b09cae092c27b6f1bde952653a22708691802e57bfef4a2973b80bea21efd3f" +checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", @@ -305,9 +305,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "arbitrary" @@ -538,7 +538,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.41", + "rustix 0.38.42", "slab", "tracing", "windows-sys 0.59.0", @@ -617,7 +617,7 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "hyper 1.5.1", @@ -650,7 +650,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "http-body-util", "mime", @@ -1027,9 +1027,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.2" +version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" +checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" dependencies = [ "jobserver", "libc", @@ -1074,9 +1074,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1106,9 +1106,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -1116,9 +1116,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -1141,9 +1141,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clap_utils" @@ -2697,9 +2697,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "fastrlp" @@ -3468,9 +3468,9 @@ dependencies = [ [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -3495,7 +3495,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.1.0", + "http 1.2.0", ] [[package]] @@ -3506,7 +3506,7 @@ checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "pin-project-lite", ] @@ -3583,7 +3583,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "httparse", "httpdate", @@ -3628,7 +3628,7 @@ checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-util", - "http 1.1.0", + "http 1.2.0", "http-body 1.0.1", "hyper 1.5.1", "pin-project-lite", @@ -4058,9 +4058,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ "once_cell", "wasm-bindgen", @@ -4209,9 +4209,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.167" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" [[package]] name = "libflate" @@ -5210,9 +5210,9 @@ dependencies = [ [[package]] name = "multihash" -version = "0.19.2" +version = "0.19.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc41f430805af9d1cf4adae4ed2149c759b877b01d909a1f40256188d09345d2" +checksum = "6b430e7953c29dd6a09afc29ff0bb69c6e306329ee6794700aee27b76a1aea8d" dependencies = [ "core2", "unsigned-varint 0.8.0", @@ -5806,12 +5806,12 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.14" +version = "2.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879952a81a83930934cbf1786752d6dedc3b1f29e8f8fb2ad1d0a36f377cf442" +checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 1.0.69", + "thiserror 2.0.6", "ucd-trie", ] @@ -5889,7 +5889,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.41", + "rustix 0.38.42", "tracing", "windows-sys 0.59.0", ] @@ -6205,7 +6205,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.19", "socket2 0.5.8", - "thiserror 2.0.3", + "thiserror 2.0.6", "tokio", "tracing", ] @@ -6224,7 +6224,7 @@ dependencies = [ "rustls 0.23.19", "rustls-pki-types", "slab", - "thiserror 2.0.3", + "thiserror 2.0.6", "tinyvec", "tracing", "web-time", @@ -6232,9 +6232,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a626c6807713b15cac82a6acaccd6043c9a5408c24baae07611fec3f243da" +checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" dependencies = [ "cfg_aliases", "libc", @@ -6728,15 +6728,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags 2.6.0", "errno", "libc", "linux-raw-sys 0.4.14", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -7816,7 +7816,7 @@ dependencies = [ "cfg-if", "fastrand", "once_cell", - "rustix 0.38.41", + "rustix 0.38.42", "windows-sys 0.59.0", ] @@ -7837,7 +7837,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5352447f921fda68cf61b4101566c0bdb5104eff6804d0678e5227580ab6a4e9" dependencies = [ - "rustix 0.38.41", + "rustix 0.38.42", "windows-sys 0.59.0", ] @@ -7870,11 +7870,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.6", ] [[package]] @@ -7890,9 +7890,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" dependencies = [ "proc-macro2", "quote", @@ -8073,9 +8073,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" dependencies = [ "futures-core", "pin-project-lite", @@ -8085,9 +8085,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -8150,7 +8150,7 @@ checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "bitflags 2.6.0", "bytes", - "http 1.1.0", + "http 1.2.0", "pin-project-lite", "tower-layer", "tower-service", @@ -8737,9 +8737,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -8748,13 +8748,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn 2.0.90", @@ -8763,9 +8762,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.47" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", @@ -8776,9 +8775,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -8786,9 +8785,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", @@ -8799,9 +8798,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "wasm-streams" @@ -8818,9 +8817,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", @@ -9226,9 +9225,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af310deaae937e48a26602b730250b4949e125f468f11e6990be3e5304ddd96f" +checksum = "ea8b391c9a790b496184c29f7f93b9ed5b16abb306c05415b68bcc16e4d06432" [[package]] name = "xmltree" diff --git a/Cargo.toml b/Cargo.toml index 4897409a..ef01b1da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,6 +62,7 @@ base64 = "0.22.1" rusqlite = "0.28.0" openssl = "0.10.68" + [profile.maxperf] inherits = "release" lto = "fat" diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs index ee937c32..4d88dac5 100644 --- a/anchor/common/ssv_types/src/operator.rs +++ b/anchor/common/ssv_types/src/operator.rs @@ -1,4 +1,5 @@ use crate::util::parse_rsa; +use base64::prelude::*; use derive_more::{Deref, From}; use openssl::pkey::Public; use openssl::rsa::Rsa; @@ -14,7 +15,8 @@ use types::Address; impl From<(u64, String, String)> for Operator { fn from(source: (u64, String, String)) -> Self { let id: OperatorId = OperatorId(source.0); - let rsa_pubkey = Rsa::public_key_from_pem(source.1.as_bytes()) + let decoded_pem = BASE64_STANDARD.decode(source.1).unwrap(); + let rsa_pubkey = Rsa::public_key_from_pem(&decoded_pem) .expect("Failed to parse String into RsaPublicKey"); let owner: Address = Address::from_str(&source.2).expect("Failed to parse String into Address"); diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 59e6ea5b..114e4a82 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -12,7 +12,7 @@ impl NetworkDatabase { // Insert the top level cluster data and associated validator metadata tx.prepare_cached(SQL[&SqlStatement::InsertCluster])? - .execute(params![*cluster.cluster_id, 0])?; + .execute(params![*cluster.cluster_id])?; tx.prepare_cached(SQL[&SqlStatement::InsertValidator])? .execute(params![ cluster.validator_metadata.validator_pubkey.to_string(), diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 8da1aafa..94e563ce 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -1,4 +1,5 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; +use base64::prelude::*; use openssl::pkey::Public; use openssl::rsa::Rsa; @@ -60,15 +61,16 @@ impl NetworkDatabase { } // Helper to encode the RsaPublicKey to PEM - fn encode_pubkey(pubkey: &Rsa) -> Vec { + fn encode_pubkey(pubkey: &Rsa) -> String { // this should never fail as the key has already been validated upon construction - pubkey - .public_key_to_pem() - .expect("Failed to encode RsaPublicKey") + BASE64_STANDARD.encode( + pubkey + .public_key_to_pem() + .expect("Failed to encode RsaPublicKey"), + ) } } -/* #[cfg(test)] mod operator_database_tests { use super::*; @@ -92,7 +94,11 @@ mod operator_database_tests { let fetched_operator = db.get_operator(&operator.id); if let Some(op) = fetched_operator { assert_eq!(op.id, operator.id); - assert_eq!(op.rsa_pubkey, operator.rsa_pubkey); + + assert_eq!( + op.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap() + ); assert_eq!(op.owner, operator.owner); } else { panic!("Expected to find operator in memory"); @@ -101,7 +107,10 @@ mod operator_database_tests { // Check to make sure the operator is also in the underlying db let db_operator = get_operator_from_db(&db, operator.id); if let Some(op) = db_operator { - assert_eq!(op.rsa_pubkey, operator.rsa_pubkey); + assert_eq!( + op.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap() + ); assert_eq!(op.id, operator.id); assert_eq!(op.owner, operator.owner); } else { @@ -144,4 +153,3 @@ mod operator_database_tests { } } } -*/ diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index bb3bad0d..50e73bc8 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -1,6 +1,6 @@ CREATE TABLE operators ( operator_id INTEGER PRIMARY KEY, - public_key BLOB NOT NULL, + public_key TEXT NOT NULL, owner_address TEXT NOT NULL, UNIQUE (public_key) ); From 18af78006462998a52d13a2bf5d3a5a1362dcbb6 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 11 Dec 2024 14:30:27 +0000 Subject: [PATCH 13/50] state store rebuild mvp --- Cargo.lock | 98 +++++------ Cargo.toml | 1 + anchor/common/ssv_types/src/lib.rs | 1 + anchor/common/ssv_types/src/operator.rs | 21 --- .../common/ssv_types/src/sql_conversions.rs | 123 +++++++++++++ anchor/database/src/cluster_operations.rs | 102 ++++++----- anchor/database/src/lib.rs | 129 ++++++++------ anchor/database/src/operator_operations.rs | 30 ++-- anchor/database/src/state.rs | 163 ++++++++++++++++++ anchor/database/src/table_schema.sql | 7 +- anchor/database/src/test_utils/mod.rs | 18 +- anchor/database/src/validator_operations.rs | 2 +- 12 files changed, 476 insertions(+), 219 deletions(-) create mode 100644 anchor/common/ssv_types/src/sql_conversions.rs create mode 100644 anchor/database/src/state.rs diff --git a/Cargo.lock b/Cargo.lock index e02a1625..cb60c79b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -896,7 +896,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "alloy-primitives", "arbitrary", @@ -1165,7 +1165,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "alloy-primitives", "clap", @@ -1229,7 +1229,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "itertools 0.10.5", ] @@ -1246,7 +1246,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "quote", "syn 1.0.109", @@ -1852,7 +1852,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2316,7 +2316,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2338,7 +2338,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2406,7 +2406,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "bytes", "discv5 0.9.0", @@ -2810,7 +2810,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -3156,7 +3156,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "async-channel", "asynchronous-codec", @@ -3351,9 +3351,9 @@ checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" [[package]] name = "hickory-proto" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07698b8420e2f0d6447a436ba999ec85d8fbf2a398bbd737b82cac4a2e96e512" +checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" dependencies = [ "async-trait", "cfg-if", @@ -3362,7 +3362,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.4.0", + "idna", "ipnet", "once_cell", "rand", @@ -3376,9 +3376,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28757f23aa75c98f254cf0405e6d8c25b831b32921b050a66692427679b1f243" +checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" dependencies = [ "cfg-if", "futures-util", @@ -3783,16 +3783,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.0.3" @@ -3963,7 +3953,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "bytes", ] @@ -4159,7 +4149,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "arbitrary", "c-kzg", @@ -4799,7 +4789,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4857,7 +4847,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "git-version", "target_info", @@ -4936,7 +4926,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -4984,7 +4974,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "fnv", ] @@ -5060,7 +5050,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5102,7 +5092,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "prometheus", ] @@ -5303,9 +5293,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" +checksum = "16c903aa70590cb93691bf97a767c8d1d6122d2cc9070433deb3bbf36ce8bd23" dependencies = [ "bytes", "futures", @@ -5944,7 +5934,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "reqwest", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6870,7 +6860,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" [[package]] name = "salsa20" @@ -7044,7 +7034,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "serde", "url", @@ -7052,9 +7042,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] @@ -7071,9 +7061,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", @@ -7666,7 +7656,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -7795,7 +7785,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "async-channel", "futures", @@ -7853,7 +7843,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "quote", "syn 1.0.109", @@ -8128,14 +8118,14 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.2", "tokio", "tower-layer", "tower-service", @@ -8368,7 +8358,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8462,12 +8452,6 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" version = "1.0.14" @@ -8552,7 +8536,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#fec502db9f93923f5fa965aad970ac244930c321" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -8565,7 +8549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", - "idna 1.0.3", + "idna", "percent-encoding", ] diff --git a/Cargo.toml b/Cargo.toml index ef01b1da..249aedaf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ client = { path = "anchor/client" } qbft = { path = "anchor/qbft" } http_api = { path = "anchor/http_api" } http_metrics = { path = "anchor/http_metrics" } +database = { path = "anchor/database" } network = { path ="anchor/network"} version = { path ="anchor/common/version"} ssv_types = { path = "anchor/common/ssv_types" } diff --git a/anchor/common/ssv_types/src/lib.rs b/anchor/common/ssv_types/src/lib.rs index 6d25f44d..4cf950d1 100644 --- a/anchor/common/ssv_types/src/lib.rs +++ b/anchor/common/ssv_types/src/lib.rs @@ -4,4 +4,5 @@ pub use share::Share; mod cluster; mod operator; mod share; +mod sql_conversions; mod util; diff --git a/anchor/common/ssv_types/src/operator.rs b/anchor/common/ssv_types/src/operator.rs index 4d88dac5..ce7f1cf5 100644 --- a/anchor/common/ssv_types/src/operator.rs +++ b/anchor/common/ssv_types/src/operator.rs @@ -1,33 +1,12 @@ use crate::util::parse_rsa; -use base64::prelude::*; use derive_more::{Deref, From}; use openssl::pkey::Public; use openssl::rsa::Rsa; use std::cmp::Eq; use std::fmt::Debug; use std::hash::Hash; -use std::str::FromStr; use types::Address; -/// From (id, pubkey, owner) for easy db converion -/// This should never fail as if it does it indicates some data corruption in the DB as the data is -/// saved from a previously constructed operator -impl From<(u64, String, String)> for Operator { - fn from(source: (u64, String, String)) -> Self { - let id: OperatorId = OperatorId(source.0); - let decoded_pem = BASE64_STANDARD.decode(source.1).unwrap(); - let rsa_pubkey = Rsa::public_key_from_pem(&decoded_pem) - .expect("Failed to parse String into RsaPublicKey"); - let owner: Address = - Address::from_str(&source.2).expect("Failed to parse String into Address"); - Operator { - id, - rsa_pubkey, - owner, - } - } -} - /// Unique identifier for an Operator. #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] pub struct OperatorId(pub u64); diff --git a/anchor/common/ssv_types/src/sql_conversions.rs b/anchor/common/ssv_types/src/sql_conversions.rs new file mode 100644 index 00000000..8730f7de --- /dev/null +++ b/anchor/common/ssv_types/src/sql_conversions.rs @@ -0,0 +1,123 @@ +use crate::{ + Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, + ValidatorMetadata, +}; +use base64::prelude::*; +use openssl::rsa::Rsa; +use rusqlite::{types::Type, Error as SqlError, Row}; +use std::io::{Error, ErrorKind}; +use std::str::FromStr; +use types::{Address, Graffiti, PublicKey, GRAFFITI_BYTES_LEN}; + +// Helper for converting to Rustqlite Error +fn from_sql_error( + col: usize, + t: Type, + e: E, +) -> SqlError { + SqlError::FromSqlConversionFailure(col, t, Box::new(e)) +} + +// Conversion from SQL row to an Operator +impl TryFrom<&Row<'_>> for Operator { + // Change the error type to rusqlite::Error + type Error = SqlError; + + fn try_from(row: &Row) -> Result { + let id: OperatorId = OperatorId(row.get(0)?); + + // For each operation that could fail, we convert its error to a rusqlite::Error + let pem_string = row.get::<_, String>(1)?; + let decoded_pem = BASE64_STANDARD + .decode(pem_string) + .map_err(|e| from_sql_error(1, Type::Text, e))?; + + let rsa_pubkey = + Rsa::public_key_from_pem(&decoded_pem).map_err(|e| from_sql_error(1, Type::Text, e))?; + + let owner_str = row.get::<_, String>(2)?; + let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(2, Type::Text, e))?; + + Ok(Operator { + id, + rsa_pubkey, + owner, + }) + } +} + +// Conversion from SQL row into a Share +impl TryFrom<&Row<'_>> for Share { + type Error = rusqlite::Error; + fn try_from(row: &Row) -> Result { + // We get the share_pubkey string from column 2 + let share_pubkey_str = row.get::<_, String>(2)?; + + // Convert the string to PublicKey, wrapping any parsing errors + let share_pubkey = PublicKey::from_str(&share_pubkey_str) + .map_err(|e| from_sql_error(2, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + + Ok(Share { share_pubkey }) + } +} + +// Conversion from SQL row and cluster members into a Cluster +impl TryFrom<(&Row<'_>, Vec)> for Cluster { + type Error = rusqlite::Error; + fn try_from((row, cluster_members): (&Row, Vec)) -> Result { + // These are simple numeric/boolean conversions that use rusqlite's built-in error handling + let cluster_id: ClusterId = ClusterId(row.get(0)?); + let faulty: u64 = row.get(1)?; + let liquidated: bool = row.get(2)?; + + // Convert the row to ValidatorMetadata - this will use the ValidatorMetadata impl + // defined below + let validator_metadata: ValidatorMetadata = row.try_into()?; + + Ok(Cluster { + cluster_id, + cluster_members, + faulty, + liquidated, + validator_metadata, + }) + } +} + +// Conversion from SQL row to ValidatorMetadata +impl TryFrom<&Row<'_>> for ValidatorMetadata { + type Error = SqlError; + fn try_from(row: &Row) -> Result { + // Get and parse validator_pubkey from column 3 + let validator_pubkey_str = row.get::<_, String>(3)?; + let validator_pubkey = PublicKey::from_str(&validator_pubkey_str) + .map_err(|e| from_sql_error(2, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + + // Get the owner from column 7 + let owner_str = row.get::<_, String>(4)?; + let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(7, Type::Text, e))?; + + // The rest of the field may not be populated upon first insert. So the values may be + // default + + // Get and parse fee_recipient from column 4 + let fee_recipient_str = row.get::<_, String>(4)?; + let fee_recipient = + Address::from_str(&fee_recipient_str).map_err(|e| from_sql_error(4, Type::Text, e))?; + + // Get the Graffifi from column 5 + let graffiti = Graffiti(row.get::<_, [u8; GRAFFITI_BYTES_LEN]>(5)?); + + // Get validator_index from column 6 + let validator_index: ValidatorIndex = ValidatorIndex(row.get(6)?); + + + Ok(ValidatorMetadata { + validator_index, + validator_pubkey, + fee_recipient, + graffiti, + owner, + }) + } +} diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 114e4a82..49bd7094 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,7 +1,6 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::params; use ssv_types::{Cluster, ClusterId}; -use std::collections::{HashMap, HashSet}; /// Implements all cluster related functionality on the database impl NetworkDatabase { @@ -16,11 +15,18 @@ impl NetworkDatabase { tx.prepare_cached(SQL[&SqlStatement::InsertValidator])? .execute(params![ cluster.validator_metadata.validator_pubkey.to_string(), - *cluster.cluster_id + *cluster.cluster_id, + cluster.validator_metadata.owner.to_string() ])?; // Insert all of the members and their shares + let mut member_in_cluster = false; cluster.cluster_members.iter().try_for_each(|member| { + if let Some(id) = self.state.id { + if id == member.operator_id { + member_in_cluster = true; + } + } tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? .execute(params![*member.cluster_id, *member.operator_id])?; self.insert_share( @@ -35,30 +41,34 @@ impl NetworkDatabase { // Commit all operations to the db tx.commit()?; - // Since we have successfully committed, we can now store everything in memory - self.clusters.insert(cluster.cluster_id); - self.validator_metadata - .insert(cluster.cluster_id, cluster.validator_metadata); - - let mut shares = HashMap::with_capacity(cluster.cluster_members.len()); - let mut members = HashSet::with_capacity(cluster.cluster_members.len()); - - // Process all members in a single iteration - for member in cluster.cluster_members { - shares.insert(member.operator_id, member.share); - members.insert(member.operator_id); + // If we are a member in this cluster, store relevant information + if member_in_cluster { + let cluster_id = cluster.cluster_id; + // Store the cluster_id since we are a part of this cluster + self.state.clusters.insert(cluster_id); + cluster.cluster_members.iter().for_each(|member| { + // Store all of the operators that are a member of this cluster + self.state + .cluster_members + .entry(cluster_id) + .or_default() + .insert(member.operator_id); + // Store our share of the key + if member.operator_id == self.state.id.expect("Guaranteed to be populated") { + self.state.shares.insert(cluster_id, member.share.clone()); + } + }); + // Store the metadata of the validator for the cluster + self.state + .validator_metadata + .insert(cluster_id, cluster.validator_metadata); } - - // Bulk insert the processed data - self.shares.insert(cluster.cluster_id, shares); - self.cluster_members.insert(cluster.cluster_id, members); - Ok(()) } /// Mark the cluster as liquidated or active pub fn update_status(&mut self, id: ClusterId, status: bool) -> Result<(), DatabaseError> { - if !self.clusters.contains(&id) { + if !self.state.clusters.contains(&id) { return Err(DatabaseError::NotFound(format!( "Cluster with id {} not in database", *id @@ -68,23 +78,6 @@ impl NetworkDatabase { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateClusterStatus])? .execute(params![status, *id])?; - // todo!() change in memory status - Ok(()) - } - - /// Update the number of fauly nodes in the cluster - pub fn update_faulty(&mut self, id: ClusterId, num_faulty: u64) -> Result<(), DatabaseError> { - if !self.clusters.contains(&id) { - return Err(DatabaseError::NotFound(format!( - "Cluster with id {} not in database", - *id - ))); - } - - let conn = self.connection()?; - conn.prepare_cached(SQL[&SqlStatement::UpdateClusterFaulty])? - .execute(params![num_faulty, *id])?; - // todo!() change in memory status Ok(()) } @@ -93,7 +86,7 @@ impl NetworkDatabase { /// This corresponds to a validator being removed or exiting pub fn delete_cluster(&mut self, id: ClusterId) -> Result<(), DatabaseError> { // Make sure this cluster exists - if !self.clusters.contains(&id) { + if !self.state.clusters.contains(&id) { return Err(DatabaseError::NotFound(format!( "Cluster with id {} not in database", *id @@ -104,25 +97,25 @@ impl NetworkDatabase { conn.prepare_cached(SQL[&SqlStatement::DeleteCluster])? .execute(params![*id])?; - // remove all in memory stores: todo!() need to figure out exactly how to structure in - // memory - let _ = self.clusters.remove(&id); + // If we are a member of this cluster, remove all relevant information + if self.state.clusters.contains(&id) { + self.state.clusters.remove(&id); + self.state.shares.remove(&id); + self.state.validator_metadata.remove(&id); + self.state.cluster_members.remove(&id); + } Ok(()) } - - /// Check if this cluster exists - pub fn cluster_exists(&self, id: &ClusterId) -> bool { - self.clusters.contains(id) - } } #[cfg(test)] mod cluster_database_tests { use super::*; use crate::test_utils::{ - db_with_cluster, dummy_cluster, dummy_operator, get_cluster_from_db, + db_with_cluster, debug_print_db, dummy_cluster, dummy_operator, get_cluster_from_db, get_cluster_member_from_db, get_shares_from_db, get_validator_from_db, }; + use ssv_types::OperatorId; use tempfile::tempdir; #[test] @@ -131,7 +124,7 @@ mod cluster_database_tests { // Create a temporary database let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::create(&file).unwrap(); + let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); // First insert the operators that will be part of the cluster for i in 0..4 { @@ -143,10 +136,13 @@ mod cluster_database_tests { let cluster = dummy_cluster(4); assert!(db.insert_cluster(cluster.clone()).is_ok()); + debug_print_db(&db); + println!("{:#?}", db.state); + // Verify cluster is in memory - assert!(db.cluster_exists(&cluster.cluster_id)); + assert!(db.member_of_cluster(&cluster.cluster_id)); assert_eq!( - db.cluster_members[&cluster.cluster_id].len(), + db.state.cluster_members[&cluster.cluster_id].len(), cluster.cluster_members.len() ); @@ -182,7 +178,7 @@ mod cluster_database_tests { // Create a temporary database let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::create(&file).unwrap(); + let mut db = NetworkDatabase::new(&file, None).unwrap(); // Try to insert a cluster without first inserting its operators let cluster = dummy_cluster(4); @@ -196,7 +192,7 @@ mod cluster_database_tests { // Create a temporary database let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::create(&file).unwrap(); + let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); // populate the db with operators and cluster let cluster = db_with_cluster(&mut db); @@ -205,7 +201,7 @@ mod cluster_database_tests { assert!(db.delete_cluster(cluster.cluster_id).is_ok()); let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); - assert!(!db.cluster_exists(&cluster.cluster_id)); + assert!(!db.member_of_cluster(&cluster.cluster_id)); assert!(cluster_row.is_none()); // Make sure all the members are gone diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 63ca9fe4..b96ddca4 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -12,62 +12,79 @@ mod cluster_operations; pub mod error; mod operator_operations; mod share_operations; +mod state; mod validator_operations; #[cfg(test)] pub mod test_utils; type Pool = r2d2::Pool; -pub const POOL_SIZE: u32 = 1; -pub const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); - -/// Top level NetworkDatabase that contains in memory storage for quick access -/// to relevant information and a connection to the database -#[derive(Debug, Clone)] -pub struct NetworkDatabase { +type PoolConn = r2d2::PooledConnection; +const POOL_SIZE: u32 = 1; +const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); + +#[derive(Debug, Clone, Default)] +pub struct NetworkState { + /// The ID of our own operator. This is determined via events when the operator is + /// registered with the network. Therefore, this may not be available right away if the client + /// is running but has not bee registered with the network contract yet. + id: Option, /// All of the operators in the network operators: HashMap, - /// All of the clusters in the networ + /// All of the Clusters that we are a memeber of clusters: HashSet, - /// Mapping of a cluster ID to its relevant Validator metadata + /// All of the shares that we are responsible for/own + shares: HashMap, + /// ValidatorMetadata for clusters we are a member in validator_metadata: HashMap, - /// Double layer share map from Cluster => Operator => Share - shares: HashMap>, - /// Maps a ClusterID to the operators in its cluster + /// Full set of members for a cluster we are in cluster_members: HashMap>, +} + +/// Top level NetworkDatabase that contains in memory storage for quick access +/// to relevant information and a connection to the database +#[derive(Debug, Clone)] +pub struct NetworkDatabase { + /// Custom state stores for easy data access + state: NetworkState, /// Connection to the database conn_pool: Pool, } impl NetworkDatabase { - /// Open an existing database at the given `path`, or create one if none exists. - pub fn open_or_create(path: &Path) -> Result { + /// Construct a new NetworkDatabase at the given path and with the OperatorID if registered + pub fn new(path: &Path, id: Option) -> Result { + let conn_pool = Self::open_or_create(path)?; + let state = if let Some(id) = id { + NetworkState::new_with_state(&conn_pool, id)? + } else { + NetworkState::default() + }; + Ok(Self { state, conn_pool }) + } + + // Open an existing database at the given `path`, or create one if none exists. + fn open_or_create(path: &Path) -> Result { if path.exists() { - Self::open(path) + Self::open_conn_pool(path) } else { Self::create(path) } } - // Open an existing `NetworkDatabase` from disk. - fn open(path: &Path) -> Result { - let conn_pool = Self::open_conn_pool(path)?; - - // todo!(): populate in memory stores - - let db = Self { - operators: HashMap::new(), - clusters: HashSet::new(), - validator_metadata: HashMap::new(), - shares: HashMap::new(), - cluster_members: HashMap::new(), - conn_pool, - }; - Ok(db) + // Build a new connection pool + fn open_conn_pool(path: &Path) -> Result { + let manager = SqliteConnectionManager::file(path); + // some other args here + let conn_pool = Pool::builder() + .max_size(POOL_SIZE) + .connection_timeout(CONNECTION_TIMEOUT) + .build(manager)?; + Ok(conn_pool) } - /// Create a `NetworkDatabase` at the given path. - pub fn create(path: &Path) -> Result { + // Create a database at the given path. + fn create(path: &Path) -> Result { let _file = File::options() .write(true) .read(true) @@ -80,32 +97,11 @@ impl NetworkDatabase { // create all of the tables conn.execute_batch(include_str!("table_schema.sql"))?; - - // todo!() populate in memory stores - - Ok(Self { - operators: HashMap::new(), - clusters: HashSet::new(), - validator_metadata: HashMap::new(), - shares: HashMap::new(), - cluster_members: HashMap::new(), - conn_pool, - }) - } - - /// Build a new connection pool - fn open_conn_pool(path: &Path) -> Result { - let manager = SqliteConnectionManager::file(path); - // some other args here - let conn_pool = Pool::builder() - .max_size(POOL_SIZE) - .connection_timeout(CONNECTION_TIMEOUT) - .build(manager)?; Ok(conn_pool) } // Open a new connection - fn connection(&self) -> Result, DatabaseError> { + fn connection(&self) -> Result { Ok(self.conn_pool.get()?) } } @@ -115,12 +111,15 @@ impl NetworkDatabase { pub(crate) enum SqlStatement { InsertOperator, DeleteOperator, + GetAllOperators, InsertCluster, InsertClusterMember, UpdateClusterStatus, UpdateClusterFaulty, DeleteCluster, + GetAllClusters, + GetClusterMembers, InsertShare, InsertValidator, @@ -139,6 +138,8 @@ pub(crate) static SQL: LazyLock> = LazyLock: SqlStatement::DeleteOperator, "DELETE FROM operators WHERE operator_id = ?1", ); + + m.insert(SqlStatement::GetAllOperators, "SELECT * FROM operators"); m.insert( SqlStatement::InsertCluster, "INSERT INTO clusters (cluster_id) VALUES (?1)", @@ -159,11 +160,27 @@ pub(crate) static SQL: LazyLock> = LazyLock: SqlStatement::DeleteCluster, "DELETE FROM clusters WHERE cluster_id = ?1", ); + m.insert( + SqlStatement::GetAllClusters, + "SELECT c.cluster_id, c.faulty, c.liquidated, + v.validator_pubkey, v.fee_recipient, v.graffiti, v.validator_index, v.owner + FROM clusters c + JOIN cluster_members cm ON c.cluster_id = cm.cluster_id + JOIN validators v ON c.cluster_id = v.cluster_id + WHERE cm.operator_id = ?", + ); + m.insert( + SqlStatement::GetClusterMembers, + "SELECT cm.cluster_id, cm.operator_id, s.share_pubkey + FROM cluster_members cm + JOIN shares s ON cm.cluster_id = s.cluster_id AND cm.operator_id = s.operator_id + WHERE cm.cluster_id = ?", + ); m.insert(SqlStatement::InsertShare, "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey) VALUES (?1, ?2, ?3, ?4)"); m.insert( SqlStatement::InsertValidator, - "INSERT INTO validators (validator_pubkey, cluster_id) VALUES (?1, ?2)", + "INSERT INTO validators (validator_pubkey, cluster_id, owner) VALUES (?1, ?2, ?3)", ); m.insert( SqlStatement::UpdateFeeRecipient, @@ -189,7 +206,7 @@ mod database_test { fn test_create_database() { let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let db = NetworkDatabase::open_or_create(&file); + let db = NetworkDatabase::new(&file, None); assert!(db.is_ok()); } } diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 94e563ce..f837bad9 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -11,13 +11,14 @@ impl NetworkDatabase { /// Insert a new operator into the database pub fn insert_operator(&mut self, operator: &Operator) -> Result<(), DatabaseError> { // make sure that this operator does not already exist - if self.operators.contains_key(&operator.id) { + if self.state.operators.contains_key(&operator.id) { return Err(DatabaseError::NotFound(format!( "Operator with id {} not in database", *operator.id ))); } + // Insert into the database, then store in memory let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::InsertOperator])? .execute(params![ @@ -25,16 +26,14 @@ impl NetworkDatabase { Self::encode_pubkey(&operator.rsa_pubkey), operator.owner.to_string() ])?; - - // then, store in memory - self.operators.insert(operator.id, operator.clone()); + self.state.operators.insert(operator.id, operator.clone()); Ok(()) } /// Delete an operator pub fn delete_operator(&mut self, id: OperatorId) -> Result<(), DatabaseError> { // make sure that it exists - if !self.operators.contains_key(&id) { + if !self.state.operators.contains_key(&id) { return Err(DatabaseError::NotFound(format!( "Operator with id {} not in database", *id @@ -46,18 +45,15 @@ impl NetworkDatabase { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::DeleteOperator])? .execute(params![*id])?; - self.operators.remove(&id); - Ok(()) - } - /// Get operator data from in memory store - pub fn get_operator(&self, id: &OperatorId) -> Option<&Operator> { - self.operators.get(id) + // Remove the operator + self.state.operators.remove(&id); + Ok(()) } - /// Check to see if the operator exists - pub fn operator_exists(&self, id: &OperatorId) -> bool { - self.operators.contains_key(id) + /// Set the id of our own operator + pub fn set_own_id(&mut self, id: OperatorId) { + self.state.id = Some(id); } // Helper to encode the RsaPublicKey to PEM @@ -84,7 +80,7 @@ mod operator_database_tests { // Create a temporary database let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::create(&file).unwrap(); + let mut db = NetworkDatabase::new(&file, None).unwrap(); // Insert dummy operator data into the database let operator = dummy_operator(1); @@ -124,7 +120,7 @@ mod operator_database_tests { // Create a temporary database let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::create(&file).unwrap(); + let mut db = NetworkDatabase::new(&file, None).unwrap(); // Insert dummy operator data into the database let operator = dummy_operator(1); @@ -145,7 +141,7 @@ mod operator_database_tests { fn test_insert_multiple_operators() { let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::create(&file).unwrap(); + let mut db = NetworkDatabase::new(&file, None).unwrap(); for id in 0..4 { let operator = dummy_operator(id); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs new file mode 100644 index 00000000..1058de8a --- /dev/null +++ b/anchor/database/src/state.rs @@ -0,0 +1,163 @@ +use crate::{DatabaseError, NetworkDatabase, NetworkState, Pool, PoolConn, SqlStatement, SQL}; +use ssv_types::{ + Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorMetadata, +}; +use std::collections::{HashMap, HashSet}; + +impl NetworkState { + // Main constructor that builds the network state from the database data + pub(crate) fn new_with_state(conn_pool: &Pool, id: OperatorId) -> Result { + // Get database connection from the pool + let conn = conn_pool.get()?; + + // First Phase: Fetch data from the database + // Get all of the operators from the network + let operators = Self::fetch_operators(&conn)?; + // Get clusters that this operator (id) participates in + let clusters = Self::fetch_clusters(&conn, id)?; + + // Second phase: Transform data into efficient state stores + // Pre-allocate HashMaps with known capacity + let num_clusters = clusters.len(); + let mut shares: HashMap = HashMap::with_capacity(num_clusters); + let mut validator_metadata: HashMap = + HashMap::with_capacity(num_clusters); + let mut cluster_members: HashMap> = + HashMap::with_capacity(num_clusters); + + println!("{:#?}", clusters); + // Populate state stores from cluster data + clusters.iter().for_each(|cluster| { + let cluster_id = cluster.cluster_id; + + // Store validator metadata for each cluster + validator_metadata.insert(cluster_id, cluster.validator_metadata.to_owned()); + + // Process each member in the cluster + for member in cluster.cluster_members.clone().into_iter() { + // Track cluster membership + cluster_members + .entry(cluster_id) + .or_default() + .insert(member.operator_id); + + // If this member is us, store our share + if member.operator_id == id { + shares.insert(cluster_id, member.share); + } + } + }); + + // Return fully constructed state + Ok(Self { + id: Some(id), + operators, + clusters: clusters.iter().map(|c| c.cluster_id).collect(), + shares, + validator_metadata, + cluster_members, + }) + } + + // Fetch and transform operator data from database + fn fetch_operators(conn: &PoolConn) -> Result, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllOperators])?; + let operators = stmt + .query_map([], |row| { + // Transform row into an operator and colleciton into HashMap + let operator: Operator = row.try_into()?; + Ok((operator.id, operator)) + })? + .map(|result| result.map_err(DatabaseError::from)); + operators.collect() + } + + // Fetch and transform cluster data for a specific operator + fn fetch_clusters( + conn: &PoolConn, + operator_id: OperatorId, + ) -> Result, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllClusters])?; + let cluster = stmt + .query_map([operator_id.0], |row| { + let cluster_id = ClusterId(row.get(0)?); + + // Get all of the cluster members, and then construct the cluster + let cluster_members = Self::fetch_cluster_members(conn, cluster_id)?; + Cluster::try_from((row, cluster_members)) + })? + .map(|result| result.map_err(DatabaseError::from)); + cluster.collect() + } + + // Fetch members of a specific cluster + fn fetch_cluster_members( + conn: &PoolConn, + cluster_id: ClusterId, + ) -> Result, rusqlite::Error> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetClusterMembers])?; + let cluster_members = stmt.query_map([cluster_id.0], |row| { + // Fetch all of the cluster members for the given ClusterId + let share = row.try_into()?; + Ok(ClusterMember { + operator_id: OperatorId(row.get(1)?), + cluster_id, + share, + }) + })?; + cluster_members.collect() + } +} + +// Clean interface for accessing network state +impl NetworkDatabase { + /// Get operator data from in-memory store + pub fn get_operator(&self, id: &OperatorId) -> Option<&Operator> { + self.state.operators.get(id) + } + + /// Check if an operator exists + pub fn operator_exists(&self, id: &OperatorId) -> bool { + self.state.operators.contains_key(id) + } + + /// Check if we are a member of a specific cluster + pub fn member_of_cluster(&self, id: &ClusterId) -> bool { + self.state.clusters.contains(id) + } +} + +#[cfg(test)] +mod database_state_tests { + use super::*; + use crate::test_utils::{ + db_with_cluster, debug_print_db, dummy_cluster, dummy_operator, get_cluster_from_db, + get_cluster_member_from_db, get_shares_from_db, get_validator_from_db, + }; + use ssv_types::OperatorId; + use tempfile::tempdir; + + #[test] + fn test_state_after_restart() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); + + // Insert the operators and a cluster we are a part of + for i in 0..4 { + let operator = dummy_operator(i); + assert!(db.insert_operator(&operator).is_ok()); + } + // Insert a dummy cluster + let cluster = dummy_cluster(4); + assert!(db.insert_cluster(cluster.clone()).is_ok()); + println!("{:#?}", db.state); + + // drop db and recreate it, stores should be built since db already exists + drop(db); + + let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); + println!("{:#?}", db.state); + } +} diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 50e73bc8..76b2651f 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -22,9 +22,10 @@ CREATE TABLE cluster_members ( CREATE TABLE validators ( validator_pubkey TEXT PRIMARY KEY, cluster_id INTEGER NOT NULL, - fee_recipient TEXT, - graffiti BLOB, - validator_index INTEGER, + fee_recipient TEXT DEFAULT '0x0000000000000000000000000000000000000000', + owner TEXT, + graffiti BLOB DEFAULT X'0000000000000000000000000000000000000000000000000000000000000000', + validator_index INTEGER DEFAULT 0, last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE ); diff --git a/anchor/database/src/test_utils/mod.rs b/anchor/database/src/test_utils/mod.rs index 4ff7a37d..c57daa9d 100644 --- a/anchor/database/src/test_utils/mod.rs +++ b/anchor/database/src/test_utils/mod.rs @@ -94,16 +94,10 @@ pub fn get_operator_from_db(db: &NetworkDatabase, id: OperatorId) -> Option = query - .query_row(params![*id], |row| { - Ok(( - row.get(0).unwrap(), - row.get(1).unwrap(), - row.get(2).unwrap(), - )) - }) + let res: Option = query + .query_row(params![*id], |row| Ok(row.try_into().unwrap())) .ok(); - res.map(|operator| operator.into()) + res } // Get a cluster from the database @@ -230,11 +224,13 @@ pub fn debug_print_db(db: &NetworkDatabase) { let validators = stmt .query_map([], |row| { Ok(format!( - "Pubkey: {}, Cluster ID: {}, Fee Recipient: {:?}, Index: {:?}", + "Pubkey: {}, Cluster ID: {}, Fee Recipient: {:?}, Owner: {:?}, Graffiti: {:?}, Index: {:?}", row.get::<_, String>(0).unwrap(), row.get::<_, i64>(1).unwrap(), row.get::<_, Option>(2).unwrap(), - row.get::<_, Option>(3).unwrap() + row.get::<_, Option>(3).unwrap(), + row.get::<_, Vec>(4).unwrap(), + row.get::<_, Option>(5).unwrap() )) }) .unwrap(); diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index dba45599..3add82e9 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -34,7 +34,7 @@ impl NetworkDatabase { /// Get the metatdata for the cluster pub fn get_validator_metadata(&self, id: &ClusterId) -> Option<&ValidatorMetadata> { - self.validator_metadata.get(id) + self.state.validator_metadata.get(id) } } From 1652528622735a267035e87101bfebb0bba21980 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 11 Dec 2024 16:04:01 +0000 Subject: [PATCH 14/50] restructure test --- Cargo.lock | 6 +- anchor/common/ssv_types/src/share.rs | 2 + .../common/ssv_types/src/sql_conversions.rs | 12 +- anchor/database/src/cluster_operations.rs | 112 ------------------ anchor/database/src/lib.rs | 20 +--- anchor/database/src/operator_operations.rs | 83 ------------- anchor/database/src/share_operations.rs | 1 + anchor/database/src/state.rs | 35 ------ anchor/database/src/table_schema.sql | 2 +- anchor/database/src/tests/cluster_tests.rs | 107 +++++++++++++++++ anchor/database/src/tests/mod.rs | 24 ++++ anchor/database/src/tests/operator_tests.rs | 82 +++++++++++++ anchor/database/src/tests/state_tests.rs | 30 +++++ .../src/{test_utils/mod.rs => tests/utils.rs} | 1 + anchor/database/src/validator_operations.rs | 3 - 15 files changed, 262 insertions(+), 258 deletions(-) create mode 100644 anchor/database/src/tests/cluster_tests.rs create mode 100644 anchor/database/src/tests/mod.rs create mode 100644 anchor/database/src/tests/operator_tests.rs create mode 100644 anchor/database/src/tests/state_tests.rs rename anchor/database/src/{test_utils/mod.rs => tests/utils.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index cb60c79b..19874d6c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5726,7 +5726,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.7", + "redox_syscall 0.5.8", "smallvec", "windows-targets 0.52.6", ] @@ -6359,9 +6359,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ "bitflags 2.6.0", ] diff --git a/anchor/common/ssv_types/src/share.rs b/anchor/common/ssv_types/src/share.rs index 335af5ed..80672602 100644 --- a/anchor/common/ssv_types/src/share.rs +++ b/anchor/common/ssv_types/src/share.rs @@ -5,4 +5,6 @@ use types::PublicKey; pub struct Share { /// The public key of this Share pub share_pubkey: PublicKey, + /// The encrypted private key of the share + pub encrypted_private_key: [u8; 256], } diff --git a/anchor/common/ssv_types/src/sql_conversions.rs b/anchor/common/ssv_types/src/sql_conversions.rs index 8730f7de..7ac881eb 100644 --- a/anchor/common/ssv_types/src/sql_conversions.rs +++ b/anchor/common/ssv_types/src/sql_conversions.rs @@ -57,7 +57,13 @@ impl TryFrom<&Row<'_>> for Share { let share_pubkey = PublicKey::from_str(&share_pubkey_str) .map_err(|e| from_sql_error(2, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; - Ok(Share { share_pubkey }) + // Get the encrypted private key from column 3 + let encrypted_private_key: [u8; 256] = row.get(3)?; + + Ok(Share { + share_pubkey, + encrypted_private_key, + }) } } @@ -97,8 +103,7 @@ impl TryFrom<&Row<'_>> for ValidatorMetadata { let owner_str = row.get::<_, String>(4)?; let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(7, Type::Text, e))?; - // The rest of the field may not be populated upon first insert. So the values may be - // default + // The rest of the field may not be populated upon first insert so the may be defaulted // Get and parse fee_recipient from column 4 let fee_recipient_str = row.get::<_, String>(4)?; @@ -111,7 +116,6 @@ impl TryFrom<&Row<'_>> for ValidatorMetadata { // Get validator_index from column 6 let validator_index: ValidatorIndex = ValidatorIndex(row.get(6)?); - Ok(ValidatorMetadata { validator_index, validator_pubkey, diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 49bd7094..0b11cde0 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -107,115 +107,3 @@ impl NetworkDatabase { Ok(()) } } - -#[cfg(test)] -mod cluster_database_tests { - use super::*; - use crate::test_utils::{ - db_with_cluster, debug_print_db, dummy_cluster, dummy_operator, get_cluster_from_db, - get_cluster_member_from_db, get_shares_from_db, get_validator_from_db, - }; - use ssv_types::OperatorId; - use tempfile::tempdir; - - #[test] - // Test inserting a cluster into the database - fn test_insert_retrieve_cluster() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); - - // First insert the operators that will be part of the cluster - for i in 0..4 { - let operator = dummy_operator(i); - assert!(db.insert_operator(&operator).is_ok()); - } - - // Insert a dummy cluster - let cluster = dummy_cluster(4); - assert!(db.insert_cluster(cluster.clone()).is_ok()); - - debug_print_db(&db); - println!("{:#?}", db.state); - - // Verify cluster is in memory - assert!(db.member_of_cluster(&cluster.cluster_id)); - assert_eq!( - db.state.cluster_members[&cluster.cluster_id].len(), - cluster.cluster_members.len() - ); - - // Verify cluster is in the underlying database - let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); - assert!(cluster_row.is_some()); - let (db_cluster_id, db_faulty, db_liquidated) = cluster_row.unwrap(); - assert_eq!(db_cluster_id, *cluster.cluster_id as i64); - assert_eq!(db_faulty, cluster.faulty as i64); - assert_eq!(db_liquidated, cluster.liquidated); - - // Verify cluster members are in the underlying database - for member in &cluster.cluster_members { - let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); - assert!(member_row.is_some()); - let (db_cluster_id, db_operator_id) = member_row.unwrap(); - assert_eq!(db_cluster_id, *member.cluster_id as i64); - assert_eq!(db_operator_id, *member.operator_id as i64); - } - - // Verify that the shares are in the database - let all_shares = get_shares_from_db(&db, cluster.cluster_id); - assert!(!all_shares.is_empty()); - - // Verify that the validator is in the database - let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); - assert!(get_validator_from_db(&db, &validator_pubkey_str).is_some()); - } - - #[test] - /// Try inserting a cluster that does not already have registers operators in the database - fn test_insert_cluster_without_operators() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); - - // Try to insert a cluster without first inserting its operators - let cluster = dummy_cluster(4); - - // This should fail because the operators don't exist in the database - assert!(db.insert_cluster(cluster).is_err()); - } - - #[test] - fn test_delete_cluster() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); - - // populate the db with operators and cluster - let cluster = db_with_cluster(&mut db); - - // Delete the cluster and then confirm it is gone from memory and dbb - assert!(db.delete_cluster(cluster.cluster_id).is_ok()); - - let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); - assert!(!db.member_of_cluster(&cluster.cluster_id)); - assert!(cluster_row.is_none()); - - // Make sure all the members are gone - for member in &cluster.cluster_members { - let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); - assert!(member_row.is_none()); - } - - // Make sure all the shares are gone - let all_shares = get_shares_from_db(&db, cluster.cluster_id); - assert!(all_shares.is_empty()); - - // Make sure the validator this cluster represented is gone - let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); - assert!(get_validator_from_db(&db, &validator_pubkey_str).is_none()); - } -} diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index b96ddca4..cda91fe8 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -16,7 +16,7 @@ mod state; mod validator_operations; #[cfg(test)] -pub mod test_utils; +mod tests; type Pool = r2d2::Pool; type PoolConn = r2d2::PooledConnection; @@ -171,13 +171,13 @@ pub(crate) static SQL: LazyLock> = LazyLock: ); m.insert( SqlStatement::GetClusterMembers, - "SELECT cm.cluster_id, cm.operator_id, s.share_pubkey + "SELECT cm.cluster_id, cm.operator_id, s.share_pubkey, s.encrypted_key FROM cluster_members cm JOIN shares s ON cm.cluster_id = s.cluster_id AND cm.operator_id = s.operator_id WHERE cm.cluster_id = ?", ); m.insert(SqlStatement::InsertShare, - "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey) VALUES (?1, ?2, ?3, ?4)"); + "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey, encrypted_key) VALUES (?1, ?2, ?3, ?4, ?5)"); m.insert( SqlStatement::InsertValidator, "INSERT INTO validators (validator_pubkey, cluster_id, owner) VALUES (?1, ?2, ?3)", @@ -196,17 +196,3 @@ pub(crate) static SQL: LazyLock> = LazyLock: ); m }); - -#[cfg(test)] -mod database_test { - use super::*; - use tempfile::tempdir; - - #[test] - fn test_create_database() { - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let db = NetworkDatabase::new(&file, None); - assert!(db.is_ok()); - } -} diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index f837bad9..0d805e51 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -66,86 +66,3 @@ impl NetworkDatabase { ) } } - -#[cfg(test)] -mod operator_database_tests { - use super::*; - use crate::test_utils::{dummy_operator, get_operator_from_db}; - use tempfile::tempdir; - - #[test] - // Test inserting into the database and then confirming that it is both in - // memory and in the underlying database - fn test_insert_retrieve_operator() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); - - // Insert dummy operator data into the database - let operator = dummy_operator(1); - assert!(db.insert_operator(&operator).is_ok()); - - // Fetch operator from in memory store and confirm values - let fetched_operator = db.get_operator(&operator.id); - if let Some(op) = fetched_operator { - assert_eq!(op.id, operator.id); - - assert_eq!( - op.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap() - ); - assert_eq!(op.owner, operator.owner); - } else { - panic!("Expected to find operator in memory"); - } - - // Check to make sure the operator is also in the underlying db - let db_operator = get_operator_from_db(&db, operator.id); - if let Some(op) = db_operator { - assert_eq!( - op.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap() - ); - assert_eq!(op.id, operator.id); - assert_eq!(op.owner, operator.owner); - } else { - panic!("Expected to find operator in database"); - } - } - - #[test] - // Test deleting an operator and confirming it is gone from the db and in memory - fn test_insert_delete_operator() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); - - // Insert dummy operator data into the database - let operator = dummy_operator(1); - let _ = db.insert_operator(&operator); - - // Now, delete the operator - assert!(db.delete_operator(operator.id).is_ok()); - - // Confirm that is it removed from in memory - assert!(db.get_operator(&operator.id).is_none()); - - // Also confirm that it is removed from the database - assert!(get_operator_from_db(&db, operator.id).is_none()); - } - - #[test] - // insert multiple operators - fn test_insert_multiple_operators() { - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); - - for id in 0..4 { - let operator = dummy_operator(id); - assert!(db.insert_operator(&operator).is_ok()); - } - } -} diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index 11e13f76..60a7346a 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -19,6 +19,7 @@ impl NetworkDatabase { *cluster_id, *operator_id, share.share_pubkey.to_string(), + share.encrypted_private_key ])?; Ok(()) } diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 1058de8a..98ecc129 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -126,38 +126,3 @@ impl NetworkDatabase { self.state.clusters.contains(id) } } - -#[cfg(test)] -mod database_state_tests { - use super::*; - use crate::test_utils::{ - db_with_cluster, debug_print_db, dummy_cluster, dummy_operator, get_cluster_from_db, - get_cluster_member_from_db, get_shares_from_db, get_validator_from_db, - }; - use ssv_types::OperatorId; - use tempfile::tempdir; - - #[test] - fn test_state_after_restart() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); - - // Insert the operators and a cluster we are a part of - for i in 0..4 { - let operator = dummy_operator(i); - assert!(db.insert_operator(&operator).is_ok()); - } - // Insert a dummy cluster - let cluster = dummy_cluster(4); - assert!(db.insert_cluster(cluster.clone()).is_ok()); - println!("{:#?}", db.state); - - // drop db and recreate it, stores should be built since db already exists - drop(db); - - let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); - println!("{:#?}", db.state); - } -} diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 76b2651f..fe6e9317 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -26,7 +26,6 @@ CREATE TABLE validators ( owner TEXT, graffiti BLOB DEFAULT X'0000000000000000000000000000000000000000000000000000000000000000', validator_index INTEGER DEFAULT 0, - last_updated TIMESTAMP DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE ); @@ -35,6 +34,7 @@ CREATE TABLE shares ( cluster_id INTEGER NOT NULL, operator_id INTEGER NOT NULL, share_pubkey TEXT, + encrypted_key BLOB, PRIMARY KEY (validator_pubkey, operator_id), FOREIGN KEY (cluster_id, operator_id) REFERENCES cluster_members(cluster_id, operator_id) ON DELETE CASCADE, FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs new file mode 100644 index 00000000..4a25e9d9 --- /dev/null +++ b/anchor/database/src/tests/cluster_tests.rs @@ -0,0 +1,107 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod cluster_database_tests { + use super::*; + + #[test] + // Test inserting a cluster into the database + fn test_insert_retrieve_cluster() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); + + // First insert the operators that will be part of the cluster + for i in 0..4 { + let operator = dummy_operator(i); + assert!(db.insert_operator(&operator).is_ok()); + } + + // Insert a dummy cluster + let cluster = dummy_cluster(4); + assert!(db.insert_cluster(cluster.clone()).is_ok()); + + debug_print_db(&db); + println!("{:#?}", db.state); + + // Verify cluster is in memory + assert!(db.member_of_cluster(&cluster.cluster_id)); + assert_eq!( + db.state.cluster_members[&cluster.cluster_id].len(), + cluster.cluster_members.len() + ); + + // Verify cluster is in the underlying database + let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); + assert!(cluster_row.is_some()); + let (db_cluster_id, db_faulty, db_liquidated) = cluster_row.unwrap(); + assert_eq!(db_cluster_id, *cluster.cluster_id as i64); + assert_eq!(db_faulty, cluster.faulty as i64); + assert_eq!(db_liquidated, cluster.liquidated); + + // Verify cluster members are in the underlying database + for member in &cluster.cluster_members { + let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); + assert!(member_row.is_some()); + let (db_cluster_id, db_operator_id) = member_row.unwrap(); + assert_eq!(db_cluster_id, *member.cluster_id as i64); + assert_eq!(db_operator_id, *member.operator_id as i64); + } + + // Verify that the shares are in the database + let all_shares = get_shares_from_db(&db, cluster.cluster_id); + assert!(!all_shares.is_empty()); + + // Verify that the validator is in the database + let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); + assert!(get_validator_from_db(&db, &validator_pubkey_str).is_some()); + } + + #[test] + /// Try inserting a cluster that does not already have registers operators in the database + fn test_insert_cluster_without_operators() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, None).unwrap(); + + // Try to insert a cluster without first inserting its operators + let cluster = dummy_cluster(4); + + // This should fail because the operators don't exist in the database + assert!(db.insert_cluster(cluster).is_err()); + } + + #[test] + fn test_delete_cluster() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); + + // populate the db with operators and cluster + let cluster = db_with_cluster(&mut db); + + // Delete the cluster and then confirm it is gone from memory and dbb + assert!(db.delete_cluster(cluster.cluster_id).is_ok()); + + let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); + assert!(!db.member_of_cluster(&cluster.cluster_id)); + assert!(cluster_row.is_none()); + + // Make sure all the members are gone + for member in &cluster.cluster_members { + let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); + assert!(member_row.is_none()); + } + + // Make sure all the shares are gone + let all_shares = get_shares_from_db(&db, cluster.cluster_id); + assert!(all_shares.is_empty()); + + // Make sure the validator this cluster represented is gone + let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); + assert!(get_validator_from_db(&db, &validator_pubkey_str).is_none()); + } +} diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs new file mode 100644 index 00000000..eeb59795 --- /dev/null +++ b/anchor/database/src/tests/mod.rs @@ -0,0 +1,24 @@ +mod cluster_tests; +mod operator_tests; +mod state_tests; +mod utils; + +pub mod test_prelude { + pub use super::utils::*; + pub use crate::NetworkDatabase; + pub use ssv_types::*; + pub use tempfile::tempdir; +} + +#[cfg(test)] +mod database_test { + use super::test_prelude::*; + + #[test] + fn test_create_database() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let db = NetworkDatabase::new(&file, None); + assert!(db.is_ok()); + } +} diff --git a/anchor/database/src/tests/operator_tests.rs b/anchor/database/src/tests/operator_tests.rs new file mode 100644 index 00000000..0db7a62b --- /dev/null +++ b/anchor/database/src/tests/operator_tests.rs @@ -0,0 +1,82 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod operator_database_tests { + use super::*; + + #[test] + // Test inserting into the database and then confirming that it is both in + // memory and in the underlying database + fn test_insert_retrieve_operator() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, None).unwrap(); + + // Insert dummy operator data into the database + let operator = dummy_operator(1); + assert!(db.insert_operator(&operator).is_ok()); + + // Fetch operator from in memory store and confirm values + let fetched_operator = db.get_operator(&operator.id); + if let Some(op) = fetched_operator { + assert_eq!(op.id, operator.id); + + assert_eq!( + op.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap() + ); + assert_eq!(op.owner, operator.owner); + } else { + panic!("Expected to find operator in memory"); + } + + // Check to make sure the operator is also in the underlying db + let db_operator = get_operator_from_db(&db, operator.id); + if let Some(op) = db_operator { + assert_eq!( + op.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap() + ); + assert_eq!(op.id, operator.id); + assert_eq!(op.owner, operator.owner); + } else { + panic!("Expected to find operator in database"); + } + } + + #[test] + // Test deleting an operator and confirming it is gone from the db and in memory + fn test_insert_delete_operator() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, None).unwrap(); + + // Insert dummy operator data into the database + let operator = dummy_operator(1); + let _ = db.insert_operator(&operator); + + // Now, delete the operator + assert!(db.delete_operator(operator.id).is_ok()); + + // Confirm that is it removed from in memory + assert!(db.get_operator(&operator.id).is_none()); + + // Also confirm that it is removed from the database + assert!(get_operator_from_db(&db, operator.id).is_none()); + } + + #[test] + // insert multiple operators + fn test_insert_multiple_operators() { + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, None).unwrap(); + + for id in 0..4 { + let operator = dummy_operator(id); + assert!(db.insert_operator(&operator).is_ok()); + } + } +} diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs new file mode 100644 index 00000000..d75ff349 --- /dev/null +++ b/anchor/database/src/tests/state_tests.rs @@ -0,0 +1,30 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod state_database_tests { + use super::*; + + #[test] + fn test_state_after_restart() { + // Create a temporary database + let dir = tempdir().unwrap(); + let file = dir.path().join("db.sqlite"); + let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); + + // Insert the operators and a cluster we are a part of + for i in 0..4 { + let operator = dummy_operator(i); + assert!(db.insert_operator(&operator).is_ok()); + } + // Insert a dummy cluster + let cluster = dummy_cluster(4); + assert!(db.insert_cluster(cluster.clone()).is_ok()); + println!("{:#?}", db.state); + + // drop db and recreate it, stores should be built since db already exists + drop(db); + + let db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); + println!("{:#?}", db.state); + } +} diff --git a/anchor/database/src/test_utils/mod.rs b/anchor/database/src/tests/utils.rs similarity index 99% rename from anchor/database/src/test_utils/mod.rs rename to anchor/database/src/tests/utils.rs index c57daa9d..f4de0555 100644 --- a/anchor/database/src/test_utils/mod.rs +++ b/anchor/database/src/tests/utils.rs @@ -59,6 +59,7 @@ pub fn dummy_cluster_member(cluster_id: ClusterId, operator_id: OperatorId) -> C pub fn dummy_share() -> Share { Share { share_pubkey: random_pubkey(), + encrypted_private_key: [0u8; 256], } } diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 3add82e9..40154112 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -37,6 +37,3 @@ impl NetworkDatabase { self.state.validator_metadata.get(id) } } - -#[cfg(test)] -mod validator_database_tests {} From e99b7efd8360784eca93a05654c1b7cbff28ff5f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 11 Dec 2024 20:35:02 +0000 Subject: [PATCH 15/50] refactor entire test utilities, setup generalized testing framework --- anchor/database/src/lib.rs | 4 +- anchor/database/src/operator_operations.rs | 5 - anchor/database/src/state.rs | 6 +- anchor/database/src/tests/cluster_tests.rs | 104 +-- anchor/database/src/tests/operator_tests.rs | 114 ++-- anchor/database/src/tests/state_tests.rs | 42 +- anchor/database/src/tests/utils.rs | 693 ++++++++++++++------ anchor/network/Cargo.toml | 2 +- 8 files changed, 587 insertions(+), 383 deletions(-) diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index cda91fe8..482c47b6 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -9,7 +9,7 @@ use std::sync::LazyLock; use std::time::Duration; mod cluster_operations; -pub mod error; +mod error; mod operator_operations; mod share_operations; mod state; @@ -24,7 +24,7 @@ const POOL_SIZE: u32 = 1; const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); #[derive(Debug, Clone, Default)] -pub struct NetworkState { +struct NetworkState { /// The ID of our own operator. This is determined via events when the operator is /// registered with the network. Therefore, this may not be available right away if the client /// is running but has not bee registered with the network contract yet. diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 0d805e51..b56d62e9 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -51,11 +51,6 @@ impl NetworkDatabase { Ok(()) } - /// Set the id of our own operator - pub fn set_own_id(&mut self, id: OperatorId) { - self.state.id = Some(id); - } - // Helper to encode the RsaPublicKey to PEM fn encode_pubkey(pubkey: &Rsa) -> String { // this should never fail as the key has already been validated upon construction diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 98ecc129..da7c79f8 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -25,7 +25,6 @@ impl NetworkState { let mut cluster_members: HashMap> = HashMap::with_capacity(num_clusters); - println!("{:#?}", clusters); // Populate state stores from cluster data clusters.iter().for_each(|cluster| { let cluster_id = cluster.cluster_id; @@ -125,4 +124,9 @@ impl NetworkDatabase { pub fn member_of_cluster(&self, id: &ClusterId) -> bool { self.state.clusters.contains(id) } + + /// Set the id of our own operator + pub fn set_own_id(&mut self, id: OperatorId) { + self.state.id = Some(id); + } } diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index 4a25e9d9..81eb2d1b 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -7,101 +7,29 @@ mod cluster_database_tests { #[test] // Test inserting a cluster into the database fn test_insert_retrieve_cluster() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); - - // First insert the operators that will be part of the cluster - for i in 0..4 { - let operator = dummy_operator(i); - assert!(db.insert_operator(&operator).is_ok()); - } - - // Insert a dummy cluster - let cluster = dummy_cluster(4); - assert!(db.insert_cluster(cluster.clone()).is_ok()); - - debug_print_db(&db); - println!("{:#?}", db.state); - - // Verify cluster is in memory - assert!(db.member_of_cluster(&cluster.cluster_id)); - assert_eq!( - db.state.cluster_members[&cluster.cluster_id].len(), - cluster.cluster_members.len() - ); - - // Verify cluster is in the underlying database - let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); - assert!(cluster_row.is_some()); - let (db_cluster_id, db_faulty, db_liquidated) = cluster_row.unwrap(); - assert_eq!(db_cluster_id, *cluster.cluster_id as i64); - assert_eq!(db_faulty, cluster.faulty as i64); - assert_eq!(db_liquidated, cluster.liquidated); - - // Verify cluster members are in the underlying database - for member in &cluster.cluster_members { - let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); - assert!(member_row.is_some()); - let (db_cluster_id, db_operator_id) = member_row.unwrap(); - assert_eq!(db_cluster_id, *member.cluster_id as i64); - assert_eq!(db_operator_id, *member.operator_id as i64); - } - - // Verify that the shares are in the database - let all_shares = get_shares_from_db(&db, cluster.cluster_id); - assert!(!all_shares.is_empty()); - - // Verify that the validator is in the database - let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); - assert!(get_validator_from_db(&db, &validator_pubkey_str).is_some()); + let fixture = TestFixture::new(Some(1)); + assertions::assert_cluster_exists_fully(&fixture.db, &fixture.cluster); } #[test] - /// Try inserting a cluster that does not already have registers operators in the database + // Try inserting a cluster that does not already have registers operators in the database fn test_insert_cluster_without_operators() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); - - // Try to insert a cluster without first inserting its operators - let cluster = dummy_cluster(4); - - // This should fail because the operators don't exist in the database - assert!(db.insert_cluster(cluster).is_err()); + let mut fixture = TestFixture::new_empty(); + let cluster = generators::cluster::random(3); + fixture + .db + .insert_cluster(cluster) + .expect_err("Insertion should fail"); } #[test] + // Test deleting a cluster and make sure that it is properly cleaned up fn test_delete_cluster() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); - - // populate the db with operators and cluster - let cluster = db_with_cluster(&mut db); - - // Delete the cluster and then confirm it is gone from memory and dbb - assert!(db.delete_cluster(cluster.cluster_id).is_ok()); - - let cluster_row = get_cluster_from_db(&db, cluster.cluster_id); - assert!(!db.member_of_cluster(&cluster.cluster_id)); - assert!(cluster_row.is_none()); - - // Make sure all the members are gone - for member in &cluster.cluster_members { - let member_row = get_cluster_member_from_db(&db, member.cluster_id, member.operator_id); - assert!(member_row.is_none()); - } - - // Make sure all the shares are gone - let all_shares = get_shares_from_db(&db, cluster.cluster_id); - assert!(all_shares.is_empty()); - - // Make sure the validator this cluster represented is gone - let validator_pubkey_str = cluster.validator_metadata.validator_pubkey.to_string(); - assert!(get_validator_from_db(&db, &validator_pubkey_str).is_none()); + let mut fixture = TestFixture::new(Some(1)); + fixture + .db + .delete_cluster(fixture.cluster.cluster_id) + .expect("Failed to delete cluster"); + assertions::assert_cluster_exists_not_fully(&fixture.db, &fixture.cluster); } } diff --git a/anchor/database/src/tests/operator_tests.rs b/anchor/database/src/tests/operator_tests.rs index 0db7a62b..04767b19 100644 --- a/anchor/database/src/tests/operator_tests.rs +++ b/anchor/database/src/tests/operator_tests.rs @@ -5,78 +5,88 @@ mod operator_database_tests { use super::*; #[test] - // Test inserting into the database and then confirming that it is both in - // memory and in the underlying database + // Test to make sure we can insert new operators into the database and they are present in the + // state stores fn test_insert_retrieve_operator() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); + // Create a new text fixture with empty db + let mut fixture = TestFixture::new_empty(); - // Insert dummy operator data into the database - let operator = dummy_operator(1); - assert!(db.insert_operator(&operator).is_ok()); + // Generate a new operator and insert it + let operator = generators::operator::with_id(1); + fixture + .db + .insert_operator(&operator) + .expect("Failed to insert operator"); - // Fetch operator from in memory store and confirm values - let fetched_operator = db.get_operator(&operator.id); - if let Some(op) = fetched_operator { - assert_eq!(op.id, operator.id); + // Confirm that it exists both in the db and the state store + assertions::assert_operator_exists_fully(&fixture.db, &operator); + } - assert_eq!( - op.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap() - ); - assert_eq!(op.owner, operator.owner); - } else { - panic!("Expected to find operator in memory"); - } + #[test] + // Ensure that we cannot insert a duplicate operator into the database + fn test_duplicate_insert() { + // Create a new test fixture with empty db + let mut fixture = TestFixture::new_empty(); + + // Generate a new operator and insert it + let operator = generators::operator::with_id(1); + fixture + .db + .insert_operator(&operator) + .expect("Failed to insert operator"); - // Check to make sure the operator is also in the underlying db - let db_operator = get_operator_from_db(&db, operator.id); - if let Some(op) = db_operator { - assert_eq!( - op.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap() - ); - assert_eq!(op.id, operator.id); - assert_eq!(op.owner, operator.owner); - } else { - panic!("Expected to find operator in database"); + // Try to insert it again, this should fail + let success = fixture.db.insert_operator(&operator); + if success.is_ok() { + panic!("Expected an error when inserting an operator that is already present"); } } #[test] // Test deleting an operator and confirming it is gone from the db and in memory fn test_insert_delete_operator() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); + // Create new test fixture with empty db + let mut fixture = TestFixture::new_empty(); - // Insert dummy operator data into the database - let operator = dummy_operator(1); - let _ = db.insert_operator(&operator); + // Generate a new operator and insert it + let operator = generators::operator::with_id(1); + fixture + .db + .insert_operator(&operator) + .expect("Failed to insert operator"); // Now, delete the operator - assert!(db.delete_operator(operator.id).is_ok()); + fixture + .db + .delete_operator(operator.id) + .expect("Failed to delete operator"); - // Confirm that is it removed from in memory - assert!(db.get_operator(&operator.id).is_none()); - - // Also confirm that it is removed from the database - assert!(get_operator_from_db(&db, operator.id).is_none()); + // Confirm that it is gone + assertions::assert_operator_not_exists_fully(&fixture.db, operator.id); } #[test] - // insert multiple operators + // Test inserting multiple operators fn test_insert_multiple_operators() { - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, None).unwrap(); + // Create new test fixture with empty db + let mut fixture = TestFixture::new_empty(); + + // Generate and insert operators + let operators: Vec = (0..4).map(generators::operator::with_id).collect(); + for operator in &operators { + fixture + .db + .insert_operator(operator) + .expect("Failed to insert operator"); + } - for id in 0..4 { - let operator = dummy_operator(id); - assert!(db.insert_operator(&operator).is_ok()); + // Delete them all and confirm deletion + for operator in operators { + fixture + .db + .delete_operator(operator.id) + .expect("Failed to delete operator"); + assertions::assert_operator_not_exists_fully(&fixture.db, operator.id); } } } diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index d75ff349..05fd9ff6 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -5,26 +5,34 @@ mod state_database_tests { use super::*; #[test] - fn test_state_after_restart() { - // Create a temporary database - let dir = tempdir().unwrap(); - let file = dir.path().join("db.sqlite"); - let mut db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); + // Make sure all of the previously inserted operators are present after restart + fn test_operator_store() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(Some(1)); - // Insert the operators and a cluster we are a part of - for i in 0..4 { - let operator = dummy_operator(i); - assert!(db.insert_operator(&operator).is_ok()); + // drop the database and then recreate it + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, Some(OperatorId(1))) + .expect("Failed to create database"); + + // confirm that all of the operators exist were + for operator in fixture.operators { + assertions::assert_operator_exists_fully(&fixture.db, &operator); } - // Insert a dummy cluster - let cluster = dummy_cluster(4); - assert!(db.insert_cluster(cluster.clone()).is_ok()); - println!("{:#?}", db.state); + } + + #[test] + fn test_cluster_after_restart() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(Some(1)); + let cluster = fixture.cluster; - // drop db and recreate it, stores should be built since db already exists - drop(db); + // drop the database and then recreate it + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, Some(OperatorId(1))) + .expect("Failed to create database"); - let db = NetworkDatabase::new(&file, Some(OperatorId(1))).unwrap(); - println!("{:#?}", db.state); + // Confirm all cluster related data is still correct + assertions::assert_cluster_exists_fully(&fixture.db, &cluster); } } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index f4de0555..bcb90175 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -1,258 +1,517 @@ -use crate::NetworkDatabase; +use super::test_prelude::*; use openssl::rsa::Rsa; use rand::Rng; use rusqlite::{params, OptionalExtension}; -use ssv_types::{ - Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, - ValidatorMetadata, -}; +use std::path::PathBuf; +use tempfile::TempDir; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use types::{Address, Graffiti, PublicKey}; -// Generate a random PublicKey -pub fn random_pubkey() -> PublicKey { - let rng = &mut XorShiftRng::from_seed([42; 16]); - PublicKey::random_for_test(rng) -} +const DEFAULT_NUM_OPERATORS: u64 = 4; +const RSA_KEY_SIZE: u32 = 2048; +const DEFAULT_SEED: [u8; 16] = [42; 16]; -// Generate random operator data -pub fn dummy_operator(id: u64) -> Operator { - let op_id = OperatorId(id); - let address = Address::random(); - //let pubkey = Rsa::generate(2048).unwrap().public_key_to_pem(); - let _priv_key = Rsa::generate(2048).unwrap(); - let public_key = _priv_key.public_key_to_pem().unwrap(); - let public_key = Rsa::public_key_from_pem(&public_key).unwrap(); - Operator::new_with_pubkey(public_key, op_id, address) +// Test fixture for common scnearios +#[derive(Debug)] +pub struct TestFixture { + pub db: NetworkDatabase, + pub cluster: Cluster, + pub operators: Vec, + pub path: PathBuf, + _temp_dir: TempDir, } -// Generate a random Cluster -pub fn dummy_cluster(num_operators: u64) -> Cluster { - let cluster_id = ClusterId(rand::thread_rng().gen::().into()); - let mut members = Vec::new(); +impl TestFixture { + pub fn new(id: Option) -> Self { + let temp_dir = TempDir::new().expect("Failed to create temporary directory"); + let db_path = temp_dir.path().join("test.db"); + let mut db = if let Some(id) = id { + NetworkDatabase::new(&db_path, Some(OperatorId(id))) + .expect("Failed to create test database") + } else { + NetworkDatabase::new(&db_path, None).expect("Failed to create test database") + }; + + let operators: Vec = (0..DEFAULT_NUM_OPERATORS) + .map(generators::operator::with_id) + .collect(); + + operators.iter().for_each(|op| { + db.insert_operator(op).expect("Failed to insert operator"); + }); + + let cluster = generators::cluster::with_operators(&operators); + db.insert_cluster(cluster.clone()) + .expect("Failed to insert cluster"); - // Create members for the cluster - for i in 0..num_operators { - let member = dummy_cluster_member(cluster_id, OperatorId(i)); - members.push(member); + Self { + db, + cluster, + operators, + path: db_path, + _temp_dir: temp_dir, + } } - Cluster { - cluster_id, - cluster_members: members, - faulty: 0, - liquidated: false, - validator_metadata: dummy_validator_metadata(), + pub fn new_empty() -> Self { + let temp_dir = TempDir::new().expect("Failed to create temporary directory"); + let db_path = temp_dir.path().join("test.db"); + + let db = NetworkDatabase::new(&db_path, None).expect("Failed to create test database"); + + Self { + db, + cluster: generators::cluster::random(0), // Empty cluster + operators: Vec::new(), + path: db_path, + _temp_dir: temp_dir, + } } } -// Generate a random ClusterMember -pub fn dummy_cluster_member(cluster_id: ClusterId, operator_id: OperatorId) -> ClusterMember { - ClusterMember { - operator_id, - cluster_id, - share: dummy_share(), +// Generator functions for test data +pub mod generators { + use super::*; + + pub mod operator { + use super::*; + + pub fn random() -> Operator { + with_id(rand::thread_rng().gen()) + } + + pub fn with_id(id: u64) -> Operator { + let priv_key = Rsa::generate(RSA_KEY_SIZE).expect("Failed to generate RSA key"); + let public_key = priv_key + .public_key_to_pem() + .and_then(|pem| Rsa::public_key_from_pem(&pem)) + .expect("Failed to process RSA key"); + + Operator::new_with_pubkey(public_key, OperatorId(id), Address::random()) + } } -} -// Generate a random Share -pub fn dummy_share() -> Share { - Share { - share_pubkey: random_pubkey(), - encrypted_private_key: [0u8; 256], + pub mod cluster { + use super::*; + + pub fn random(num_operators: u64) -> Cluster { + let cluster_id = ClusterId(rand::thread_rng().gen::().into()); + let members = (0..num_operators) + .map(|i| member::new(cluster_id, OperatorId(i))) + .collect(); + + Cluster { + cluster_id, + cluster_members: members, + faulty: 0, + liquidated: false, + validator_metadata: validator::random_metadata(), + } + } + + pub fn with_operators(operators: &[Operator]) -> Cluster { + let cluster_id = ClusterId(rand::thread_rng().gen::().into()); + let members = operators + .iter() + .map(|op| member::new(cluster_id, op.id)) + .collect(); + + Cluster { + cluster_id, + cluster_members: members, + faulty: 0, + liquidated: false, + validator_metadata: validator::random_metadata(), + } + } } -} -// Generate random validator metadata -pub fn dummy_validator_metadata() -> ValidatorMetadata { - ValidatorMetadata { - validator_index: ValidatorIndex(rand::thread_rng().gen::()), - validator_pubkey: random_pubkey(), - fee_recipient: Address::random(), - graffiti: Graffiti::default(), - owner: Address::random(), + pub mod member { + use super::*; + + pub fn new(cluster_id: ClusterId, operator_id: OperatorId) -> ClusterMember { + ClusterMember { + operator_id, + cluster_id, + share: share::random(), + } + } } -} -// Construct a mock database with a cluster -pub fn db_with_cluster(db: &mut NetworkDatabase) -> Cluster { - for i in 0..4 { - let operator = dummy_operator(i); - db.insert_operator(&operator).unwrap(); + pub mod share { + use super::*; + + pub fn random() -> Share { + Share { + share_pubkey: pubkey::random(), + encrypted_private_key: [0u8; 256], + } + } } - // Insert a dummy cluster - let cluster = dummy_cluster(4); - db.insert_cluster(cluster.clone()).unwrap(); - cluster -} + pub mod pubkey { + use super::*; -// Get an Operator from the database -pub fn get_operator_from_db(db: &NetworkDatabase, id: OperatorId) -> Option { - let conn = db.connection().unwrap(); - let mut query = conn - .prepare( - "SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1", - ) - .unwrap(); - let res: Option = query - .query_row(params![*id], |row| Ok(row.try_into().unwrap())) - .ok(); - res -} + pub fn random() -> PublicKey { + let rng = &mut XorShiftRng::from_seed(DEFAULT_SEED); + PublicKey::random_for_test(rng) + } + } -// Get a cluster from the database -pub fn get_cluster_from_db(db: &NetworkDatabase, id: ClusterId) -> Option<(i64, i64, bool)> { - let conn = db.connection().unwrap(); - let mut stmt = conn - .prepare("SELECT cluster_id, faulty, liquidated FROM clusters WHERE cluster_id = ?1") - .unwrap(); - let cluster_row: Option<(i64, i64, bool)> = stmt - .query_row(params![*id], |row| { - Ok((row.get(0)?, row.get(1)?, row.get(2)?)) - }) - .optional() - .unwrap(); - cluster_row -} + pub mod validator { + use super::*; -// Get all of the shares for a cluster -// Get all shares for a cluster -pub fn get_shares_from_db( - db: &NetworkDatabase, - cluster_id: ClusterId, -) -> Vec<(String, i64, i64, Option)> { - let conn = db.connection().unwrap(); - let mut stmt = conn - .prepare("SELECT validator_pubkey, cluster_id, operator_id, share_pubkey FROM shares WHERE cluster_id = ?1") - .unwrap(); - let shares = stmt - .query_map(params![*cluster_id], |row| { - Ok(( - row.get(0).unwrap(), - row.get(1).unwrap(), - row.get(2).unwrap(), - row.get(3).unwrap(), - )) - }) - .unwrap() - .map(|r| r.unwrap()) - .collect(); - shares + pub fn random_metadata() -> ValidatorMetadata { + ValidatorMetadata { + validator_index: ValidatorIndex(rand::thread_rng().gen()), + validator_pubkey: pubkey::random(), + fee_recipient: Address::random(), + graffiti: Graffiti::default(), + owner: Address::random(), + } + } + } } -// Get validator metadata from the database -pub fn get_validator_from_db(db: &NetworkDatabase, pubkey: &str) -> Option<(String, i64)> { - let conn = db.connection().unwrap(); - let mut stmt = conn - .prepare("SELECT validator_pubkey, cluster_id FROM validators WHERE validator_pubkey = ?1") - .unwrap(); - stmt.query_row(params![pubkey], |row| Ok((row.get(0)?, row.get(1)?))) - .optional() - .unwrap() -} +/// Database queries for testing +pub mod queries { + use super::*; + + pub fn get_operator(db: &NetworkDatabase, id: OperatorId) -> Option { + let conn = db.connection().unwrap(); + let operators = conn.prepare("SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1") + .unwrap() + .query_row(params![*id], |row| Ok(row.try_into().unwrap())) + .ok(); + operators + } + + pub fn get_cluster(db: &NetworkDatabase, id: ClusterId) -> Option<(i64, i64, bool)> { + let conn = db.connection().unwrap(); + let cluster = conn + .prepare("SELECT cluster_id, faulty, liquidated FROM clusters WHERE cluster_id = ?1") + .unwrap() + .query_row(params![*id], |row| { + Ok((row.get(0)?, row.get(1)?, row.get(2)?)) + }) + .optional() + .unwrap(); + cluster + } -// Get a ClusterMember from the database -pub fn get_cluster_member_from_db( - db: &NetworkDatabase, - cluster_id: ClusterId, - operator_id: OperatorId, -) -> Option<(i64, i64)> { - let conn = db.connection().unwrap(); - let mut stmt = conn.prepare("SELECT cluster_id, operator_id FROM cluster_members WHERE cluster_id = ?1 AND operator_id = ?2").unwrap(); - let member_row: Option<(i64, i64)> = stmt - .query_row(params![*cluster_id, *operator_id], |row| { - Ok((row.get(0)?, row.get(1)?)) - }) - .optional() - .unwrap(); - member_row + pub fn get_shares( + db: &NetworkDatabase, + cluster_id: ClusterId, + ) -> Vec<(String, i64, i64, Option)> { + let conn = db.connection().unwrap(); + + let mut stmt = conn + .prepare("SELECT validator_pubkey, cluster_id, operator_id, share_pubkey FROM shares WHERE cluster_id = ?1") + .unwrap(); + let shares = stmt + .query_map(params![*cluster_id], |row| { + Ok(( + row.get(0).unwrap(), + row.get(1).unwrap(), + row.get(2).unwrap(), + row.get(3).unwrap(), + )) + }) + .unwrap() + .map(|r| r.unwrap()) + .collect(); + shares + } + + pub fn get_cluster_member( + db: &NetworkDatabase, + cluster_id: ClusterId, + operator_id: OperatorId, + ) -> Option<(i64, i64)> { + let conn = db.connection().unwrap(); + let member = conn.prepare("SELECT cluster_id, operator_id FROM cluster_members WHERE cluster_id = ?1 AND operator_id = ?2") + .unwrap() + .query_row(params![*cluster_id, *operator_id], |row| Ok((row.get(0)?, row.get(1)?))) + .optional() + .unwrap(); + member + } + + pub fn get_validator( + db: &NetworkDatabase, + validator_pubkey: &str, + ) -> Option<(String, i64, String)> { + let conn = db.connection().unwrap(); + let validator = conn.prepare("SELECT validator_pubkey, cluster_id, owner FROM validators WHERE validator_pubkey = ?1") + .unwrap() + .query_row(params![validator_pubkey], |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?))) + .optional() + .unwrap(); + validator + } } -// Debug print the entire database. For testing purposes -pub fn debug_print_db(db: &NetworkDatabase) { - let conn = db.connection().unwrap(); - - println!("\n=== CLUSTERS ==="); - let mut stmt = conn.prepare("SELECT * FROM clusters").unwrap(); - let clusters = stmt - .query_map([], |row| { - Ok(format!( - "Cluster ID: {}, Faulty: {}, Liquidated: {}", - row.get::<_, i64>(0).unwrap(), - row.get::<_, i64>(1).unwrap(), - row.get::<_, bool>(2).unwrap() - )) - }) - .unwrap(); - for cluster in clusters { - println!("{}", cluster.unwrap()); +/// Database assertions for testing +pub mod assertions { + use super::*; + + pub fn assert_operator_exists_fully(db: &NetworkDatabase, operator: &Operator) { + // Check in-memory state + let fetched = db + .get_operator(&operator.id) + .expect("Operator not found in memory"); + + assert_eq!(fetched.id, operator.id, "Operator ID mismatch in memory"); + assert_eq!( + fetched.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap(), + "Operator public key mismatch in memory" + ); + assert_eq!( + fetched.owner, operator.owner, + "Operator owner mismatch in memory" + ); + + // Check database state + let db_operator = + queries::get_operator(db, operator.id).expect("Operator not found in database"); + + assert_eq!( + db_operator.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap(), + "Operator public key mismatch in database" + ); + assert_eq!( + db_operator.id, operator.id, + "Operator ID mismatch in database" + ); + assert_eq!( + db_operator.owner, operator.owner, + "Operator owner mismatch in database" + ); } - println!("\n=== OPERATORS ==="); - let mut stmt = conn.prepare("SELECT * FROM operators").unwrap(); - let operators = stmt - .query_map([], |row| { - Ok(format!( - "Operator ID: {}, PublicKey: {}, Owner: {}", - row.get::<_, i64>(0).unwrap(), - row.get::<_, String>(1).unwrap(), - row.get::<_, String>(2).unwrap() - )) - }) - .unwrap(); - for operator in operators { - println!("{}", operator.unwrap()); + pub fn assert_operator_not_exists_fully(db: &NetworkDatabase, operator_id: OperatorId) { + // Check memory + assert!( + db.get_operator(&operator_id).is_none(), + "Operator still exists in memory" + ); + + // Check database + assert!( + queries::get_operator(db, operator_id).is_none(), + "Operator still exists in database" + ); } - println!("\n=== CLUSTER MEMBERS ==="); - let mut stmt = conn.prepare("SELECT * FROM cluster_members").unwrap(); - let members = stmt - .query_map([], |row| { - Ok(format!( - "Cluster ID: {}, Operator ID: {}", - row.get::<_, i64>(0).unwrap(), - row.get::<_, i64>(1).unwrap() - )) - }) - .unwrap(); - for member in members { - println!("{}", member.unwrap()); + /// Verifies that a cluster exists and all its data is correctly stored + pub fn assert_cluster_exists_fully(db: &NetworkDatabase, cluster: &Cluster) { + // Check cluster base data + let (id, faulty, liquidated) = + queries::get_cluster(db, cluster.cluster_id).expect("Cluster not found in database"); + + assert_eq!(id as u64, *cluster.cluster_id, "Cluster ID mismatch"); + assert_eq!( + faulty as u64, cluster.faulty, + "Cluster faulty count mismatch" + ); + assert_eq!( + liquidated, cluster.liquidated, + "Cluster liquidated status mismatch" + ); + + // Verify cluster is in memory if we're a member + if let Some(our_id) = db.state.id { + if cluster + .cluster_members + .iter() + .any(|m| m.operator_id == our_id) + { + assert!( + db.state.clusters.contains(&cluster.cluster_id), + "Cluster not found in memory state" + ); + assert_eq!( + db.state.cluster_members[&cluster.cluster_id].len(), + cluster.cluster_members.len(), + "Cluster members count mismatch in memory" + ); + } + } + + // Verify cluster members + for member in &cluster.cluster_members { + let member_exists = + queries::get_cluster_member(db, member.cluster_id, member.operator_id) + .expect("Cluster member not found in database"); + + assert_eq!( + member_exists.0 as u64, *member.cluster_id, + "Cluster member cluster ID mismatch" + ); + assert_eq!( + member_exists.1 as u64, *member.operator_id, + "Cluster member operator ID mismatch" + ); + } + + // Verify shares + let shares = queries::get_shares(db, cluster.cluster_id); + assert!(!shares.is_empty(), "No shares found for cluster"); + + // Verify validator metadata + let validator = + queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) + .expect("Validator not found in database"); + + assert_eq!( + validator.0, + cluster.validator_metadata.validator_pubkey.to_string(), + "Validator pubkey mismatch" + ); + assert_eq!( + validator.1 as u64, *cluster.cluster_id, + "Validator cluster ID mismatch" + ); + assert_eq!( + validator.2, + cluster.validator_metadata.owner.to_string(), + "Validator owner mismatch" + ); } - println!("\n=== VALIDATORS ==="); - let mut stmt = conn.prepare("SELECT * FROM validators").unwrap(); - let validators = stmt - .query_map([], |row| { - Ok(format!( - "Pubkey: {}, Cluster ID: {}, Fee Recipient: {:?}, Owner: {:?}, Graffiti: {:?}, Index: {:?}", - row.get::<_, String>(0).unwrap(), - row.get::<_, i64>(1).unwrap(), - row.get::<_, Option>(2).unwrap(), - row.get::<_, Option>(3).unwrap(), - row.get::<_, Vec>(4).unwrap(), - row.get::<_, Option>(5).unwrap() - )) - }) - .unwrap(); - for validator in validators { - println!("{}", validator.unwrap()); + /// Verifies that a cluster does not exist in any form + pub fn assert_cluster_exists_not_fully(db: &NetworkDatabase, cluster: &Cluster) { + // Verify cluster base data is gone + assert!( + queries::get_cluster(db, cluster.cluster_id).is_none(), + "Cluster still exists in database" + ); + + // Verify cluster is not in memory + assert!( + !db.state.clusters.contains(&cluster.cluster_id), + "Cluster still exists in memory state" + ); + assert!( + !db.state.cluster_members.contains_key(&cluster.cluster_id), + "Cluster members still exist in memory state" + ); + + // Verify all cluster members are gone + for member in &cluster.cluster_members { + assert!( + queries::get_cluster_member(db, member.cluster_id, member.operator_id).is_none(), + "Cluster member still exists in database" + ); + } + + // Verify all shares are gone + let shares = queries::get_shares(db, cluster.cluster_id); + assert!(shares.is_empty(), "Shares still exist for cluster"); + + // Verify validator metadata is gone + assert!( + queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) + .is_none(), + "Validator still exists in database" + ); + assert!( + !db.state + .validator_metadata + .contains_key(&cluster.cluster_id), + "Validator metadata still exists in memory state" + ); } +} + +pub mod debug { + use super::*; + pub fn debug_print_db(db: &NetworkDatabase) { + let conn = db.connection().unwrap(); + + println!("\n=== CLUSTERS ==="); + let mut stmt = conn.prepare("SELECT * FROM clusters").unwrap(); + let clusters = stmt + .query_map([], |row| { + Ok(format!( + "Cluster ID: {}, Faulty: {}, Liquidated: {}", + row.get::<_, i64>(0).unwrap(), + row.get::<_, i64>(1).unwrap(), + row.get::<_, bool>(2).unwrap() + )) + }) + .unwrap(); + for cluster in clusters { + println!("{}", cluster.unwrap()); + } + + println!("\n=== OPERATORS ==="); + let mut stmt = conn.prepare("SELECT * FROM operators").unwrap(); + let operators = stmt + .query_map([], |row| { + Ok(format!( + "Operator ID: {}, PublicKey: {}, Owner: {}", + row.get::<_, i64>(0).unwrap(), + row.get::<_, String>(1).unwrap(), + row.get::<_, String>(2).unwrap() + )) + }) + .unwrap(); + for operator in operators { + println!("{}", operator.unwrap()); + } + + println!("\n=== CLUSTER MEMBERS ==="); + let mut stmt = conn.prepare("SELECT * FROM cluster_members").unwrap(); + let members = stmt + .query_map([], |row| { + Ok(format!( + "Cluster ID: {}, Operator ID: {}", + row.get::<_, i64>(0).unwrap(), + row.get::<_, i64>(1).unwrap() + )) + }) + .unwrap(); + for member in members { + println!("{}", member.unwrap()); + } + + println!("\n=== VALIDATORS ==="); + let mut stmt = conn.prepare("SELECT * FROM validators").unwrap(); + let validators = stmt + .query_map([], |row| { + Ok(format!( + "Pubkey: {}, Cluster ID: {}, Fee Recipient: {:?}, Owner: {:?}, Graffiti: {:?}, Index: {:?}", + row.get::<_, String>(0).unwrap(), + row.get::<_, i64>(1).unwrap(), + row.get::<_, Option>(2).unwrap(), + row.get::<_, Option>(3).unwrap(), + row.get::<_, Vec>(4).unwrap(), + row.get::<_, Option>(5).unwrap() + )) + }) + .unwrap(); + for validator in validators { + println!("{}", validator.unwrap()); + } - println!("\n=== SHARES ==="); - let mut stmt = conn.prepare("SELECT * FROM shares").unwrap(); - let shares = stmt - .query_map([], |row| { - Ok(format!( - "Validator Pubkey: {}, Cluster ID: {}, Operator ID: {}, Share Pubkey: {:?}", - row.get::<_, String>(0).unwrap(), - row.get::<_, i64>(1).unwrap(), - row.get::<_, i64>(2).unwrap(), - row.get::<_, Option>(3).unwrap() - )) - }) - .unwrap(); - for share in shares { - println!("{}", share.unwrap()); + println!("\n=== SHARES ==="); + let mut stmt = conn.prepare("SELECT * FROM shares").unwrap(); + let shares = stmt + .query_map([], |row| { + Ok(format!( + "Validator Pubkey: {}, Cluster ID: {}, Operator ID: {}, Share Pubkey: {:?}", + row.get::<_, String>(0).unwrap(), + row.get::<_, i64>(1).unwrap(), + row.get::<_, i64>(2).unwrap(), + row.get::<_, Option>(3).unwrap() + )) + }) + .unwrap(); + for share in shares { + println!("{}", share.unwrap()); + } } } diff --git a/anchor/network/Cargo.toml b/anchor/network/Cargo.toml index 73801a6a..3c8bac68 100644 --- a/anchor/network/Cargo.toml +++ b/anchor/network/Cargo.toml @@ -17,4 +17,4 @@ serde = { workspace = true } tracing = { workspace = true } [dev-dependencies] -async-channel = { workspace = true } \ No newline at end of file +async-channel = { workspace = true } From 05650ab0ce219d8ec70784a9bee00132475a7354 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 11 Dec 2024 20:37:36 +0000 Subject: [PATCH 16/50] removed unused code --- anchor/database/src/tests/utils.rs | 96 +----------------------------- 1 file changed, 1 insertion(+), 95 deletions(-) diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index bcb90175..81343650 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -61,7 +61,7 @@ impl TestFixture { Self { db, - cluster: generators::cluster::random(0), // Empty cluster + cluster: generators::cluster::random(0), operators: Vec::new(), path: db_path, _temp_dir: temp_dir, @@ -76,10 +76,6 @@ pub mod generators { pub mod operator { use super::*; - pub fn random() -> Operator { - with_id(rand::thread_rng().gen()) - } - pub fn with_id(id: u64) -> Operator { let priv_key = Rsa::generate(RSA_KEY_SIZE).expect("Failed to generate RSA key"); let public_key = priv_key @@ -425,93 +421,3 @@ pub mod assertions { ); } } - -pub mod debug { - use super::*; - pub fn debug_print_db(db: &NetworkDatabase) { - let conn = db.connection().unwrap(); - - println!("\n=== CLUSTERS ==="); - let mut stmt = conn.prepare("SELECT * FROM clusters").unwrap(); - let clusters = stmt - .query_map([], |row| { - Ok(format!( - "Cluster ID: {}, Faulty: {}, Liquidated: {}", - row.get::<_, i64>(0).unwrap(), - row.get::<_, i64>(1).unwrap(), - row.get::<_, bool>(2).unwrap() - )) - }) - .unwrap(); - for cluster in clusters { - println!("{}", cluster.unwrap()); - } - - println!("\n=== OPERATORS ==="); - let mut stmt = conn.prepare("SELECT * FROM operators").unwrap(); - let operators = stmt - .query_map([], |row| { - Ok(format!( - "Operator ID: {}, PublicKey: {}, Owner: {}", - row.get::<_, i64>(0).unwrap(), - row.get::<_, String>(1).unwrap(), - row.get::<_, String>(2).unwrap() - )) - }) - .unwrap(); - for operator in operators { - println!("{}", operator.unwrap()); - } - - println!("\n=== CLUSTER MEMBERS ==="); - let mut stmt = conn.prepare("SELECT * FROM cluster_members").unwrap(); - let members = stmt - .query_map([], |row| { - Ok(format!( - "Cluster ID: {}, Operator ID: {}", - row.get::<_, i64>(0).unwrap(), - row.get::<_, i64>(1).unwrap() - )) - }) - .unwrap(); - for member in members { - println!("{}", member.unwrap()); - } - - println!("\n=== VALIDATORS ==="); - let mut stmt = conn.prepare("SELECT * FROM validators").unwrap(); - let validators = stmt - .query_map([], |row| { - Ok(format!( - "Pubkey: {}, Cluster ID: {}, Fee Recipient: {:?}, Owner: {:?}, Graffiti: {:?}, Index: {:?}", - row.get::<_, String>(0).unwrap(), - row.get::<_, i64>(1).unwrap(), - row.get::<_, Option>(2).unwrap(), - row.get::<_, Option>(3).unwrap(), - row.get::<_, Vec>(4).unwrap(), - row.get::<_, Option>(5).unwrap() - )) - }) - .unwrap(); - for validator in validators { - println!("{}", validator.unwrap()); - } - - println!("\n=== SHARES ==="); - let mut stmt = conn.prepare("SELECT * FROM shares").unwrap(); - let shares = stmt - .query_map([], |row| { - Ok(format!( - "Validator Pubkey: {}, Cluster ID: {}, Operator ID: {}, Share Pubkey: {:?}", - row.get::<_, String>(0).unwrap(), - row.get::<_, i64>(1).unwrap(), - row.get::<_, i64>(2).unwrap(), - row.get::<_, Option>(3).unwrap() - )) - }) - .unwrap(); - for share in shares { - println!("{}", share.unwrap()); - } - } -} From 6a399982649e8b8824aa5a3fc36b067d80c15c6e Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 11 Dec 2024 22:24:01 +0000 Subject: [PATCH 17/50] validator tests --- anchor/database/src/state.rs | 34 +++++++++- anchor/database/src/tests/mod.rs | 1 + anchor/database/src/tests/operator_tests.rs | 10 +++ anchor/database/src/tests/utils.rs | 6 +- anchor/database/src/tests/validator_tests.rs | 69 ++++++++++++++++++++ anchor/database/src/validator_operations.rs | 45 ++++++++++--- 6 files changed, 150 insertions(+), 15 deletions(-) create mode 100644 anchor/database/src/tests/validator_tests.rs diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index da7c79f8..d5cff8e6 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -1,11 +1,13 @@ use crate::{DatabaseError, NetworkDatabase, NetworkState, Pool, PoolConn, SqlStatement, SQL}; use ssv_types::{ - Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorMetadata, + Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, + ValidatorMetadata, }; use std::collections::{HashMap, HashSet}; +use types::Address; impl NetworkState { - // Main constructor that builds the network state from the database data + /// Build the network state from the database data pub(crate) fn new_with_state(conn_pool: &Pool, id: OperatorId) -> Result { // Get database connection from the pool let conn = conn_pool.get()?; @@ -125,8 +127,36 @@ impl NetworkDatabase { self.state.clusters.contains(id) } + /// Get own share of key for a Cluster we are a member in + pub fn get_share(&self, id: &ClusterId) -> Option<&Share> { + self.state.shares.get(id) + } + /// Set the id of our own operator pub fn set_own_id(&mut self, id: OperatorId) { self.state.id = Some(id); } + + /// Get the metatdata for the cluster + pub fn get_validator_metadata(&self, id: &ClusterId) -> Option<&ValidatorMetadata> { + self.state.validator_metadata.get(id) + } + + /// Get the Fee Recipient address + pub fn get_fee_recipient(&self, id: &ClusterId) -> Option
{ + if let Some(metadata) = self.state.validator_metadata.get(id) { + Some(metadata.fee_recipient) + } else { + None + } + } + + /// Get the Validator Index + pub fn get_validator_index(&self, id: &ClusterId) -> Option { + if let Some(metadata) = self.state.validator_metadata.get(id) { + Some(metadata.validator_index) + } else { + None + } + } } diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs index eeb59795..22a2c887 100644 --- a/anchor/database/src/tests/mod.rs +++ b/anchor/database/src/tests/mod.rs @@ -2,6 +2,7 @@ mod cluster_tests; mod operator_tests; mod state_tests; mod utils; +mod validator_tests; pub mod test_prelude { pub use super::utils::*; diff --git a/anchor/database/src/tests/operator_tests.rs b/anchor/database/src/tests/operator_tests.rs index 04767b19..101c69b6 100644 --- a/anchor/database/src/tests/operator_tests.rs +++ b/anchor/database/src/tests/operator_tests.rs @@ -89,4 +89,14 @@ mod operator_database_tests { assertions::assert_operator_not_exists_fully(&fixture.db, operator.id); } } + + #[test] + /// Try to delete an operator that does not exist + fn test_delete_dne_operator() { + let mut fixture = TestFixture::new_empty(); + fixture + .db + .delete_operator(OperatorId(1)) + .expect_err("Deletion should fail. Operator DNE"); + } } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 81343650..5dcb6f63 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -236,11 +236,11 @@ pub mod queries { pub fn get_validator( db: &NetworkDatabase, validator_pubkey: &str, - ) -> Option<(String, i64, String)> { + ) -> Option<(String, i64, String, String, i64)> { let conn = db.connection().unwrap(); - let validator = conn.prepare("SELECT validator_pubkey, cluster_id, owner FROM validators WHERE validator_pubkey = ?1") + let validator = conn.prepare("SELECT validator_pubkey, cluster_id, owner, fee_recipient, validator_index FROM validators WHERE validator_pubkey = ?1") .unwrap() - .query_row(params![validator_pubkey], |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?))) + .query_row(params![validator_pubkey], |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?))) .optional() .unwrap(); validator diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs new file mode 100644 index 00000000..d4d2d894 --- /dev/null +++ b/anchor/database/src/tests/validator_tests.rs @@ -0,0 +1,69 @@ +use super::test_prelude::*; + +#[cfg(test)] +mod validator_database_tests { + use super::*; + use types::Address; + + #[test] + /// Test updating the fee recipient address + fn test_update_fee_recipient() { + let mut fixture = TestFixture::new(Some(1)); + + let validator_pubkey = fixture.cluster.validator_metadata.validator_pubkey; + let updated_fee_recipient = Address::random(); + let cluster_id = fixture.cluster.cluster_id; + fixture + .db + .update_fee_recipient(cluster_id, validator_pubkey.clone(), updated_fee_recipient) + .expect("Failed to update fee recipient"); + + // make sure the state store has changed, then check the db + assert_eq!( + updated_fee_recipient, + fixture + .db + .get_fee_recipient(&cluster_id) + .expect("Failed to get fee recipient") + ); + assert_eq!( + updated_fee_recipient.to_string(), + queries::get_validator(&fixture.db, &(validator_pubkey.to_string())) + .expect("Failed to fetch Validator") + .3 + ); + } + + #[test] + /// Test setting the validator index + fn test_set_validator_index() { + let mut fixture = TestFixture::new(Some(1)); + + let validator_pubkey = fixture.cluster.validator_metadata.validator_pubkey; + let updated_validator_index = ValidatorIndex(10); + let cluster_id = fixture.cluster.cluster_id; + fixture + .db + .set_validator_index( + cluster_id, + validator_pubkey.clone(), + updated_validator_index, + ) + .expect("Failed to update validator index"); + + // make sure the state store has changed, then check the db + assert_eq!( + updated_validator_index, + fixture + .db + .get_validator_index(&cluster_id) + .expect("Failed to get validator index") + ); + assert_eq!( + *updated_validator_index as i64, + queries::get_validator(&fixture.db, &(validator_pubkey.to_string())) + .expect("Failed to fetch Validator") + .4 + ); + } +} diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 40154112..33206eb7 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -1,39 +1,64 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::params; -use ssv_types::{ClusterId, ValidatorIndex, ValidatorMetadata}; +use ssv_types::{ClusterId, ValidatorIndex}; use types::{Address, PublicKey}; /// Implements all validator related db functionality impl NetworkDatabase { - /// Populates or updates the fee recipient for the validator + /// Update the fee recipient address for a validator pub fn update_fee_recipient( &mut self, + cluster_id: ClusterId, validator_pubkey: PublicKey, fee_recipient: Address, ) -> Result<(), DatabaseError> { + // Make sure we are part of the cluster for this Validator + if !self.state.clusters.contains(&cluster_id) { + return Err(DatabaseError::NotFound(format!( + "Validator for Cluster {} not in database", + *cluster_id + ))); + } + let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? .execute(params![ - validator_pubkey.to_string(), - fee_recipient.to_string() + fee_recipient.to_string(), + validator_pubkey.to_string() ])?; + let metadata = self + .state + .validator_metadata + .get_mut(&cluster_id) + .expect("Cluster should exist"); + metadata.fee_recipient = fee_recipient; Ok(()) } /// Set the index of the validator pub fn set_validator_index( &mut self, + cluster_id: ClusterId, validator_pubkey: PublicKey, index: ValidatorIndex, ) -> Result<(), DatabaseError> { + // Make sure we are part of the cluster for this validaor + if !self.state.clusters.contains(&cluster_id) { + return Err(DatabaseError::NotFound(format!( + "Validator for Cluster {} not in database", + *cluster_id + ))); + } + let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::SetValidatorIndex])? - .execute(params![validator_pubkey.to_string(), *index])?; + .execute(params![*index, validator_pubkey.to_string()])?; + let metadata = self + .state + .validator_metadata + .get_mut(&cluster_id) + .expect("Cluster should exist"); + metadata.validator_index = index; Ok(()) } - - /// Get the metatdata for the cluster - pub fn get_validator_metadata(&self, id: &ClusterId) -> Option<&ValidatorMetadata> { - self.state.validator_metadata.get(id) - } } From 2c84a52f47ee01f9a90dd4bb70633ee7691fc77c Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 11 Dec 2024 22:33:55 +0000 Subject: [PATCH 18/50] clippy fix --- anchor/database/src/state.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index d5cff8e6..6bfdd938 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -144,19 +144,17 @@ impl NetworkDatabase { /// Get the Fee Recipient address pub fn get_fee_recipient(&self, id: &ClusterId) -> Option
{ - if let Some(metadata) = self.state.validator_metadata.get(id) { - Some(metadata.fee_recipient) - } else { - None - } + self.state + .validator_metadata + .get(id) + .map(|metadata| metadata.fee_recipient) } /// Get the Validator Index pub fn get_validator_index(&self, id: &ClusterId) -> Option { - if let Some(metadata) = self.state.validator_metadata.get(id) { - Some(metadata.validator_index) - } else { - None - } + self.state + .validator_metadata + .get(id) + .map(|metadata| metadata.validator_index) } } From 897a318f324f91796c2fba910ecc7dd815828379 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 12 Dec 2024 16:01:03 +0000 Subject: [PATCH 19/50] database with pubkey --- anchor/database/src/lib.rs | 28 ++++++++----- anchor/database/src/operator_operations.rs | 38 +++++++++-------- anchor/database/src/state.rs | 36 +++++++++++++++- anchor/database/src/tests/cluster_tests.rs | 4 +- anchor/database/src/tests/mod.rs | 3 +- anchor/database/src/tests/state_tests.rs | 8 ++-- anchor/database/src/tests/utils.rs | 44 ++++++++++++-------- anchor/database/src/tests/validator_tests.rs | 5 ++- 8 files changed, 111 insertions(+), 55 deletions(-) diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 482c47b6..0b5a9b62 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,7 +1,7 @@ pub use crate::error::DatabaseError; +use openssl::{pkey::Public, rsa::Rsa}; use r2d2_sqlite::SqliteConnectionManager; -use ssv_types::{ClusterId, ValidatorMetadata}; -use ssv_types::{Operator, OperatorId, Share}; +use ssv_types::{ClusterId, Operator, OperatorId, Share, ValidatorMetadata}; use std::collections::{HashMap, HashSet}; use std::fs::File; use std::path::Path; @@ -45,6 +45,8 @@ struct NetworkState { /// to relevant information and a connection to the database #[derive(Debug, Clone)] pub struct NetworkDatabase { + /// The public key of our operator + pubkey: Rsa, /// Custom state stores for easy data access state: NetworkState, /// Connection to the database @@ -52,15 +54,15 @@ pub struct NetworkDatabase { } impl NetworkDatabase { - /// Construct a new NetworkDatabase at the given path and with the OperatorID if registered - pub fn new(path: &Path, id: Option) -> Result { + /// Construct a new NetworkDatabase at the given path and the Public Key of our operator. + pub fn new(path: &Path, pubkey: &Rsa) -> Result { let conn_pool = Self::open_or_create(path)?; - let state = if let Some(id) = id { - NetworkState::new_with_state(&conn_pool, id)? - } else { - NetworkState::default() - }; - Ok(Self { state, conn_pool }) + let state = NetworkState::new_with_state(&conn_pool, pubkey)?; + Ok(Self { + pubkey: pubkey.clone(), + state, + conn_pool, + }) } // Open an existing database at the given `path`, or create one if none exists. @@ -111,6 +113,7 @@ impl NetworkDatabase { pub(crate) enum SqlStatement { InsertOperator, DeleteOperator, + GetOperatorId, GetAllOperators, InsertCluster, @@ -138,7 +141,10 @@ pub(crate) static SQL: LazyLock> = LazyLock: SqlStatement::DeleteOperator, "DELETE FROM operators WHERE operator_id = ?1", ); - + m.insert( + SqlStatement::GetOperatorId, + "SELECT operator_id FROM operators WHERE public_key = ?1", + ); m.insert(SqlStatement::GetAllOperators, "SELECT * FROM operators"); m.insert( SqlStatement::InsertCluster, diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index b56d62e9..a9d0019f 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -1,7 +1,5 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use base64::prelude::*; -use openssl::pkey::Public; -use openssl::rsa::Rsa; use rusqlite::params; use ssv_types::{Operator, OperatorId}; @@ -18,14 +16,30 @@ impl NetworkDatabase { ))); } + // Check if this operator is us + if self.state.id.is_none() { + let keys_match = operator + .rsa_pubkey + .public_key_to_pem() + .and_then(|key1| self.pubkey.public_key_to_pem().map(|key2| key1 == key2)) + .unwrap_or(false); + if keys_match { + self.state.id = Some(operator.id); + } + } + + // encode the key + let encoded = BASE64_STANDARD.encode( + operator + .rsa_pubkey + .public_key_to_pem() + .expect("Failed to encode RsaPublicKey"), + ); + // Insert into the database, then store in memory let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::InsertOperator])? - .execute(params![ - *operator.id, - Self::encode_pubkey(&operator.rsa_pubkey), - operator.owner.to_string() - ])?; + .execute(params![*operator.id, encoded, operator.owner.to_string()])?; self.state.operators.insert(operator.id, operator.clone()); Ok(()) } @@ -50,14 +64,4 @@ impl NetworkDatabase { self.state.operators.remove(&id); Ok(()) } - - // Helper to encode the RsaPublicKey to PEM - fn encode_pubkey(pubkey: &Rsa) -> String { - // this should never fail as the key has already been validated upon construction - BASE64_STANDARD.encode( - pubkey - .public_key_to_pem() - .expect("Failed to encode RsaPublicKey"), - ) - } } diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 6bfdd938..783ecec9 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -1,4 +1,8 @@ use crate::{DatabaseError, NetworkDatabase, NetworkState, Pool, PoolConn, SqlStatement, SQL}; +use base64::prelude::*; +use openssl::pkey::Public; +use openssl::rsa::Rsa; +use rusqlite::{params, OptionalExtension}; use ssv_types::{ Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, ValidatorMetadata, @@ -8,10 +12,24 @@ use types::Address; impl NetworkState { /// Build the network state from the database data - pub(crate) fn new_with_state(conn_pool: &Pool, id: OperatorId) -> Result { + pub(crate) fn new_with_state( + conn_pool: &Pool, + pubkey: &Rsa, + ) -> Result { // Get database connection from the pool let conn = conn_pool.get()?; + // Without an Id, we have no idea who we are. Check to see if an operator with our PublicKey + // is stored the database, else we have to wait for it to be processed by the execution + // layer + let id = if let Ok(Some(operator_id)) = Self::does_self_exist(&conn, pubkey) { + operator_id + } else { + // If it does not exist, just default the state + println!("does note xist"); + return Ok(Self::default()); + }; + // First Phase: Fetch data from the database // Get all of the operators from the network let operators = Self::fetch_operators(&conn)?; @@ -60,6 +78,22 @@ impl NetworkState { }) } + // Check to see if an operator with the public key already exists in the database + fn does_self_exist( + conn: &PoolConn, + pubkey: &Rsa, + ) -> Result, DatabaseError> { + let encoded = BASE64_STANDARD.encode( + pubkey + .public_key_to_pem() + .expect("Failed to encode RsaPublicKey"), + ); + let mut stmt = conn.prepare(SQL[&SqlStatement::GetOperatorId])?; + stmt.query_row(params![encoded], |row| Ok(OperatorId(row.get(0)?))) + .optional() + .map_err(DatabaseError::from) + } + // Fetch and transform operator data from database fn fetch_operators(conn: &PoolConn) -> Result, DatabaseError> { let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllOperators])?; diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index 81eb2d1b..1ea5312c 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -7,7 +7,7 @@ mod cluster_database_tests { #[test] // Test inserting a cluster into the database fn test_insert_retrieve_cluster() { - let fixture = TestFixture::new(Some(1)); + let fixture = TestFixture::new(); assertions::assert_cluster_exists_fully(&fixture.db, &fixture.cluster); } @@ -25,7 +25,7 @@ mod cluster_database_tests { #[test] // Test deleting a cluster and make sure that it is properly cleaned up fn test_delete_cluster() { - let mut fixture = TestFixture::new(Some(1)); + let mut fixture = TestFixture::new(); fixture .db .delete_cluster(fixture.cluster.cluster_id) diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs index 22a2c887..a5db2e43 100644 --- a/anchor/database/src/tests/mod.rs +++ b/anchor/database/src/tests/mod.rs @@ -19,7 +19,8 @@ mod database_test { fn test_create_database() { let dir = tempdir().unwrap(); let file = dir.path().join("db.sqlite"); - let db = NetworkDatabase::new(&file, None); + let pubkey = generators::pubkey::random_rsa(); + let db = NetworkDatabase::new(&file, &pubkey); assert!(db.is_ok()); } } diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 05fd9ff6..4f307d93 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -8,11 +8,11 @@ mod state_database_tests { // Make sure all of the previously inserted operators are present after restart fn test_operator_store() { // Create new test fixture with populated DB - let mut fixture = TestFixture::new(Some(1)); + let mut fixture = TestFixture::new(); // drop the database and then recreate it drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, Some(OperatorId(1))) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) .expect("Failed to create database"); // confirm that all of the operators exist were @@ -24,12 +24,12 @@ mod state_database_tests { #[test] fn test_cluster_after_restart() { // Create new test fixture with populated DB - let mut fixture = TestFixture::new(Some(1)); + let mut fixture = TestFixture::new(); let cluster = fixture.cluster; // drop the database and then recreate it drop(fixture.db); - fixture.db = NetworkDatabase::new(&fixture.path, Some(OperatorId(1))) + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) .expect("Failed to create database"); // Confirm all cluster related data is still correct diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 5dcb6f63..df806e62 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -1,4 +1,5 @@ use super::test_prelude::*; +use openssl::pkey::Public; use openssl::rsa::Rsa; use rand::Rng; use rusqlite::{params, OptionalExtension}; @@ -18,23 +19,25 @@ pub struct TestFixture { pub cluster: Cluster, pub operators: Vec, pub path: PathBuf, + pub pubkey: Rsa, _temp_dir: TempDir, } impl TestFixture { - pub fn new(id: Option) -> Self { - let temp_dir = TempDir::new().expect("Failed to create temporary directory"); - let db_path = temp_dir.path().join("test.db"); - let mut db = if let Some(id) = id { - NetworkDatabase::new(&db_path, Some(OperatorId(id))) - .expect("Failed to create test database") - } else { - NetworkDatabase::new(&db_path, None).expect("Failed to create test database") - }; - + pub fn new() -> Self { + // Generate the operators first so we can pick one to be us let operators: Vec = (0..DEFAULT_NUM_OPERATORS) .map(generators::operator::with_id) .collect(); + let us = operators + .first() + .expect("Failed to get operator") + .rsa_pubkey + .clone(); + + let temp_dir = TempDir::new().expect("Failed to create temporary directory"); + let db_path = temp_dir.path().join("test.db"); + let mut db = NetworkDatabase::new(&db_path, &us).expect("Failed to create DB"); operators.iter().for_each(|op| { db.insert_operator(op).expect("Failed to insert operator"); @@ -43,12 +46,14 @@ impl TestFixture { let cluster = generators::cluster::with_operators(&operators); db.insert_cluster(cluster.clone()) .expect("Failed to insert cluster"); + println!("{:?}", db); Self { db, cluster, operators, path: db_path, + pubkey: us, _temp_dir: temp_dir, } } @@ -56,14 +61,16 @@ impl TestFixture { pub fn new_empty() -> Self { let temp_dir = TempDir::new().expect("Failed to create temporary directory"); let db_path = temp_dir.path().join("test.db"); + let pubkey = generators::pubkey::random_rsa(); - let db = NetworkDatabase::new(&db_path, None).expect("Failed to create test database"); + let db = NetworkDatabase::new(&db_path, &pubkey).expect("Failed to create test database"); Self { db, cluster: generators::cluster::random(0), operators: Vec::new(), path: db_path, + pubkey, _temp_dir: temp_dir, } } @@ -77,12 +84,7 @@ pub mod generators { use super::*; pub fn with_id(id: u64) -> Operator { - let priv_key = Rsa::generate(RSA_KEY_SIZE).expect("Failed to generate RSA key"); - let public_key = priv_key - .public_key_to_pem() - .and_then(|pem| Rsa::public_key_from_pem(&pem)) - .expect("Failed to process RSA key"); - + let public_key = generators::pubkey::random_rsa(); Operator::new_with_pubkey(public_key, OperatorId(id), Address::random()) } } @@ -148,6 +150,14 @@ pub mod generators { pub mod pubkey { use super::*; + pub fn random_rsa() -> Rsa { + let priv_key = Rsa::generate(RSA_KEY_SIZE).expect("Failed to generate RSA key"); + priv_key + .public_key_to_pem() + .and_then(|pem| Rsa::public_key_from_pem(&pem)) + .expect("Failed to process RSA key") + } + pub fn random() -> PublicKey { let rng = &mut XorShiftRng::from_seed(DEFAULT_SEED); PublicKey::random_for_test(rng) diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index d4d2d894..bbaff638 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -8,7 +8,8 @@ mod validator_database_tests { #[test] /// Test updating the fee recipient address fn test_update_fee_recipient() { - let mut fixture = TestFixture::new(Some(1)); + let mut fixture = TestFixture::new(); + println!("{:#?}", fixture.db); let validator_pubkey = fixture.cluster.validator_metadata.validator_pubkey; let updated_fee_recipient = Address::random(); @@ -37,7 +38,7 @@ mod validator_database_tests { #[test] /// Test setting the validator index fn test_set_validator_index() { - let mut fixture = TestFixture::new(Some(1)); + let mut fixture = TestFixture::new(); let validator_pubkey = fixture.cluster.validator_metadata.validator_pubkey; let updated_validator_index = ValidatorIndex(10); From daed0c184414553398f57f32ea62e9989c2c8c7f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 12 Dec 2024 17:27:42 +0000 Subject: [PATCH 20/50] validator metadata insertion --- anchor/database/src/cluster_operations.rs | 4 ++- anchor/database/src/lib.rs | 2 +- anchor/database/src/state.rs | 1 - anchor/database/src/table_schema.sql | 2 +- anchor/database/src/tests/utils.rs | 2 +- anchor/database/src/tests/validator_tests.rs | 33 -------------------- anchor/database/src/validator_operations.rs | 29 +---------------- 7 files changed, 7 insertions(+), 66 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 0b11cde0..f6e85da7 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -16,7 +16,9 @@ impl NetworkDatabase { .execute(params![ cluster.validator_metadata.validator_pubkey.to_string(), *cluster.cluster_id, - cluster.validator_metadata.owner.to_string() + cluster.validator_metadata.owner.to_string(), + cluster.validator_metadata.owner.to_string(), + *cluster.validator_metadata.validator_index, ])?; // Insert all of the members and their shares diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 0b5a9b62..d890ac3a 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -186,7 +186,7 @@ pub(crate) static SQL: LazyLock> = LazyLock: "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey, encrypted_key) VALUES (?1, ?2, ?3, ?4, ?5)"); m.insert( SqlStatement::InsertValidator, - "INSERT INTO validators (validator_pubkey, cluster_id, owner) VALUES (?1, ?2, ?3)", + "INSERT INTO validators (validator_pubkey, cluster_id, fee_recipient, owner, validator_index) VALUES (?1, ?2, ?3, ?4, ?5)", ); m.insert( SqlStatement::UpdateFeeRecipient, diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 783ecec9..ea3780f2 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -26,7 +26,6 @@ impl NetworkState { operator_id } else { // If it does not exist, just default the state - println!("does note xist"); return Ok(Self::default()); }; diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index fe6e9317..5729af9a 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -22,7 +22,7 @@ CREATE TABLE cluster_members ( CREATE TABLE validators ( validator_pubkey TEXT PRIMARY KEY, cluster_id INTEGER NOT NULL, - fee_recipient TEXT DEFAULT '0x0000000000000000000000000000000000000000', + fee_recipient TEXT NOT NULL, owner TEXT, graffiti BLOB DEFAULT X'0000000000000000000000000000000000000000000000000000000000000000', validator_index INTEGER DEFAULT 0, diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index df806e62..66fae86a 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -169,7 +169,7 @@ pub mod generators { pub fn random_metadata() -> ValidatorMetadata { ValidatorMetadata { - validator_index: ValidatorIndex(rand::thread_rng().gen()), + validator_index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), validator_pubkey: pubkey::random(), fee_recipient: Address::random(), graffiti: Graffiti::default(), diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index bbaff638..7f17f97b 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -34,37 +34,4 @@ mod validator_database_tests { .3 ); } - - #[test] - /// Test setting the validator index - fn test_set_validator_index() { - let mut fixture = TestFixture::new(); - - let validator_pubkey = fixture.cluster.validator_metadata.validator_pubkey; - let updated_validator_index = ValidatorIndex(10); - let cluster_id = fixture.cluster.cluster_id; - fixture - .db - .set_validator_index( - cluster_id, - validator_pubkey.clone(), - updated_validator_index, - ) - .expect("Failed to update validator index"); - - // make sure the state store has changed, then check the db - assert_eq!( - updated_validator_index, - fixture - .db - .get_validator_index(&cluster_id) - .expect("Failed to get validator index") - ); - assert_eq!( - *updated_validator_index as i64, - queries::get_validator(&fixture.db, &(validator_pubkey.to_string())) - .expect("Failed to fetch Validator") - .4 - ); - } } diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 33206eb7..3b6f0fd7 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -1,6 +1,6 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::params; -use ssv_types::{ClusterId, ValidatorIndex}; +use ssv_types::ClusterId; use types::{Address, PublicKey}; /// Implements all validator related db functionality @@ -34,31 +34,4 @@ impl NetworkDatabase { metadata.fee_recipient = fee_recipient; Ok(()) } - - /// Set the index of the validator - pub fn set_validator_index( - &mut self, - cluster_id: ClusterId, - validator_pubkey: PublicKey, - index: ValidatorIndex, - ) -> Result<(), DatabaseError> { - // Make sure we are part of the cluster for this validaor - if !self.state.clusters.contains(&cluster_id) { - return Err(DatabaseError::NotFound(format!( - "Validator for Cluster {} not in database", - *cluster_id - ))); - } - - let conn = self.connection()?; - conn.prepare_cached(SQL[&SqlStatement::SetValidatorIndex])? - .execute(params![*index, validator_pubkey.to_string()])?; - let metadata = self - .state - .validator_metadata - .get_mut(&cluster_id) - .expect("Cluster should exist"); - metadata.validator_index = index; - Ok(()) - } } From 60438e2b074dae2e8cff3ef4d433265eb1c1cd4f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 12 Dec 2024 20:35:13 +0000 Subject: [PATCH 21/50] more tests and general functionality --- .../common/ssv_types/src/sql_conversions.rs | 2 - anchor/database/src/cluster_operations.rs | 8 ++ anchor/database/src/lib.rs | 15 ++++ anchor/database/src/table_schema.sql | 4 + anchor/database/src/tests/cluster_tests.rs | 74 +++++++++++++++++++ anchor/database/src/tests/utils.rs | 5 ++ anchor/database/src/validator_operations.rs | 35 ++++++++- 7 files changed, 140 insertions(+), 3 deletions(-) diff --git a/anchor/common/ssv_types/src/sql_conversions.rs b/anchor/common/ssv_types/src/sql_conversions.rs index 7ac881eb..96522cf3 100644 --- a/anchor/common/ssv_types/src/sql_conversions.rs +++ b/anchor/common/ssv_types/src/sql_conversions.rs @@ -103,8 +103,6 @@ impl TryFrom<&Row<'_>> for ValidatorMetadata { let owner_str = row.get::<_, String>(4)?; let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(7, Type::Text, e))?; - // The rest of the field may not be populated upon first insert so the may be defaulted - // Get and parse fee_recipient from column 4 let fee_recipient_str = row.get::<_, String>(4)?; let fee_recipient = diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index f6e85da7..51ee4983 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -6,6 +6,14 @@ use ssv_types::{Cluster, ClusterId}; impl NetworkDatabase { /// Inserts a new cluster into the database pub fn insert_cluster(&mut self, cluster: Cluster) -> Result<(), DatabaseError> { + // Make sure this cluster does not exists + if self.state.clusters.contains(&cluster.cluster_id) { + return Err(DatabaseError::AlreadyPresent(format!( + "Cluster with id {} already in database", + *cluster.cluster_id + ))); + } + let mut conn = self.connection()?; let tx = conn.transaction()?; diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index d890ac3a..7b3130a5 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,6 +1,7 @@ pub use crate::error::DatabaseError; use openssl::{pkey::Public, rsa::Rsa}; use r2d2_sqlite::SqliteConnectionManager; +use rusqlite::params; use ssv_types::{ClusterId, Operator, OperatorId, Share, ValidatorMetadata}; use std::collections::{HashMap, HashSet}; use std::fs::File; @@ -65,6 +66,14 @@ impl NetworkDatabase { }) } + /// Update the last processed block number in the database + pub fn processed_block(&mut self, number: u64) -> Result<(), DatabaseError> { + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateBlockNumber])? + .execute(params![number])?; + Ok(()) + } + // Open an existing database at the given `path`, or create one if none exists. fn open_or_create(path: &Path) -> Result { if path.exists() { @@ -129,6 +138,8 @@ pub(crate) enum SqlStatement { UpdateFeeRecipient, SetGraffiti, SetValidatorIndex, + + UpdateBlockNumber, } pub(crate) static SQL: LazyLock> = LazyLock::new(|| { @@ -200,5 +211,9 @@ pub(crate) static SQL: LazyLock> = LazyLock: SqlStatement::SetValidatorIndex, "UPDATE validators SET validator_index = ?1 WHERE validator_pubkey = ?2", ); + m.insert( + SqlStatement::UpdateBlockNumber, + "UPDATE block SET block_number = 1?", + ); m }); diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 5729af9a..3108a1e7 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -1,3 +1,7 @@ +CREATE TABLE block ( + block_number INTEGER DEFAULT 0 +); + CREATE TABLE operators ( operator_id INTEGER PRIMARY KEY, public_key TEXT NOT NULL, diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index 1ea5312c..bcbb0fd6 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -32,4 +32,78 @@ mod cluster_database_tests { .expect("Failed to delete cluster"); assertions::assert_cluster_exists_not_fully(&fixture.db, &fixture.cluster); } + + #[test] + // Test updating the operational status of the cluster + fn test_update_cluster_status() { + let mut fixture = TestFixture::new(); + let cluster_id = fixture.cluster.cluster_id; + + // Test updating to liquidated + fixture + .db + .update_status(cluster_id, true) + .expect("Failed to update cluster status"); + + // Verify both in memory and database + let (_, _, liquidated) = + queries::get_cluster(&fixture.db, cluster_id).expect("Cluster not found"); + assert!(liquidated, "Cluster should be liquidated"); + } + + #[test] + // Test inserting two clusters that an operator is a member of + fn test_insert_two_clusters() { + let mut fixture = TestFixture::new_empty(); + let us_pubkey = fixture.pubkey; + let us_operator = generators::operator::with_pubkey(us_pubkey); + + //generate a few more operators then add us into the group + let mut operators: Vec = (0..3).map(generators::operator::with_id).collect(); + operators.push(us_operator); + + // inset all of teh operators + for op in &operators { + fixture + .db + .insert_operator(op) + .expect("Failed to insert operator"); + } + + // generate and insert 2 clusters + let cluster1 = generators::cluster::with_operators(&operators); + let cluster2 = generators::cluster::with_operators(&operators); + for c in [cluster1.clone(), cluster2.clone()] { + fixture + .db + .insert_cluster(c) + .expect("Failed to insert cluster"); + } + + // make sure they are in the db and state store is expected + assertions::assert_cluster_exists_fully(&fixture.db, &cluster1); + assertions::assert_cluster_exists_fully(&fixture.db, &cluster2); + } + + #[test] + // Test deleting a cluster that does not exist + fn test_delete_dne_cluster() { + let mut fixture = TestFixture::new(); + let dne_id = ClusterId(*fixture.cluster.cluster_id - 1); + + fixture + .db + .delete_cluster(dne_id) + .expect_err("Expected failure when deleting cluster that does not exist"); + } + + #[test] + // Test inserting a cluster that already exists + fn test_duplicate_cluster_insert() { + let mut fixture = TestFixture::new(); + fixture + .db + .insert_cluster(fixture.cluster) + .expect_err("Expected failure when inserting cluster that already exists"); + } } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 66fae86a..57a936cc 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -83,6 +83,11 @@ pub mod generators { pub mod operator { use super::*; + pub fn with_pubkey(pubkey: Rsa) -> Operator { + let id = OperatorId(rand::thread_rng().gen::().into()); + Operator::new_with_pubkey(pubkey, id, Address::random()) + } + pub fn with_id(id: u64) -> Operator { let public_key = generators::pubkey::random_rsa(); Operator::new_with_pubkey(public_key, OperatorId(id), Address::random()) diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 3b6f0fd7..2c905d53 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -1,7 +1,7 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::params; use ssv_types::ClusterId; -use types::{Address, PublicKey}; +use types::{Address, Graffiti, PublicKey}; /// Implements all validator related db functionality impl NetworkDatabase { @@ -34,4 +34,37 @@ impl NetworkDatabase { metadata.fee_recipient = fee_recipient; Ok(()) } + + /// Update the graffiti for a validator + pub fn update_graffiti( + &mut self, + cluster_id: ClusterId, + validator_pubkey: PublicKey, + graffiti: Graffiti, + ) -> Result<(), DatabaseError> { + if !self.state.clusters.contains(&cluster_id) { + return Err(DatabaseError::NotFound(format!( + "Validator for Cluster {} not in database", + *cluster_id + ))); + } + + // Update the database + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::SetGraffiti])? + .execute(params![ + graffiti.0.as_slice(), // Convert [u8; 32] to &[u8] + validator_pubkey.to_string() + ])?; + + // Update the in-memory state + let metadata = self + .state + .validator_metadata + .get_mut(&cluster_id) + .expect("Cluster should exist since we checked above"); + metadata.graffiti = graffiti; + + Ok(()) + } } From 32c179a1c84e8cd40eded57a2dbc1b13cf473e8e Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 12 Dec 2024 21:48:17 +0000 Subject: [PATCH 22/50] fix and test block processing --- anchor/database/src/lib.rs | 14 +++++++-- anchor/database/src/state.rs | 11 +++++++ anchor/database/src/table_schema.sql | 3 +- anchor/database/src/tests/state_tests.rs | 30 +++++++++++++++++++- anchor/database/src/tests/utils.rs | 1 - anchor/database/src/tests/validator_tests.rs | 1 - 6 files changed, 53 insertions(+), 7 deletions(-) diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 7b3130a5..f68e9a92 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -40,6 +40,8 @@ struct NetworkState { validator_metadata: HashMap, /// Full set of members for a cluster we are in cluster_members: HashMap>, + /// The last block that was processed + last_processed_block: u64, } /// Top level NetworkDatabase that contains in memory storage for quick access @@ -67,10 +69,11 @@ impl NetworkDatabase { } /// Update the last processed block number in the database - pub fn processed_block(&mut self, number: u64) -> Result<(), DatabaseError> { + pub fn processed_block(&mut self, block_number: u64) -> Result<(), DatabaseError> { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateBlockNumber])? - .execute(params![number])?; + .execute(params![block_number])?; + self.state.last_processed_block = block_number; Ok(()) } @@ -140,6 +143,7 @@ pub(crate) enum SqlStatement { SetValidatorIndex, UpdateBlockNumber, + GetBlockNumber, } pub(crate) static SQL: LazyLock> = LazyLock::new(|| { @@ -213,7 +217,11 @@ pub(crate) static SQL: LazyLock> = LazyLock: ); m.insert( SqlStatement::UpdateBlockNumber, - "UPDATE block SET block_number = 1?", + "UPDATE block SET block_number = ?1", + ); + m.insert( + SqlStatement::GetBlockNumber, + "SELECT block_number FROM block", ); m }); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index ea3780f2..f69749ed 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -66,6 +66,9 @@ impl NetworkState { } }); + // Finally, get the last processed block from the database + let last_processed_block = Self::get_last_processed_block(&conn)?; + // Return fully constructed state Ok(Self { id: Some(id), @@ -74,9 +77,17 @@ impl NetworkState { shares, validator_metadata, cluster_members, + last_processed_block, }) } + // Get the last block that was processed and saved to db + fn get_last_processed_block(conn: &PoolConn) -> Result { + conn.prepare_cached(SQL[&SqlStatement::GetBlockNumber])? + .query_row(params![], |row| row.get(0)) + .map_err(DatabaseError::from) + } + // Check to see if an operator with the public key already exists in the database fn does_self_exist( conn: &PoolConn, diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 3108a1e7..589943e2 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -1,6 +1,7 @@ CREATE TABLE block ( - block_number INTEGER DEFAULT 0 + block_number INTEGER NOT NULL DEFAULT 0 CHECK (block_number >= 0) ); +INSERT INTO block (block_number) VALUES (0); CREATE TABLE operators ( operator_id INTEGER PRIMARY KEY, diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 4f307d93..eadbc022 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -5,7 +5,7 @@ mod state_database_tests { use super::*; #[test] - // Make sure all of the previously inserted operators are present after restart + // Test that the previously inserted operators are present after restart fn test_operator_store() { // Create new test fixture with populated DB let mut fixture = TestFixture::new(); @@ -22,6 +22,7 @@ mod state_database_tests { } #[test] + // Test that the proper cluster data is present after restart fn test_cluster_after_restart() { // Create new test fixture with populated DB let mut fixture = TestFixture::new(); @@ -35,4 +36,31 @@ mod state_database_tests { // Confirm all cluster related data is still correct assertions::assert_cluster_exists_fully(&fixture.db, &cluster); } + + #[test] + // Test that you can update and retrieve a block number + fn test_block_number() { + let mut fixture = TestFixture::new(); + assert_eq!(fixture.db.state.last_processed_block, 0); + fixture + .db + .processed_block(10) + .expect("Failed to update the block number"); + assert_eq!(fixture.db.state.last_processed_block, 10); + } + + #[test] + // Test to make sure the block number is loaded in after restart + fn test_block_number_after_restart() { + let mut fixture = TestFixture::new(); + fixture + .db + .processed_block(10) + .expect("Failed to update the block number"); + drop(fixture.db); + + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + assert_eq!(fixture.db.state.last_processed_block, 10); + } } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 57a936cc..c5abae16 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -46,7 +46,6 @@ impl TestFixture { let cluster = generators::cluster::with_operators(&operators); db.insert_cluster(cluster.clone()) .expect("Failed to insert cluster"); - println!("{:?}", db); Self { db, diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index 7f17f97b..a133ef34 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -9,7 +9,6 @@ mod validator_database_tests { /// Test updating the fee recipient address fn test_update_fee_recipient() { let mut fixture = TestFixture::new(); - println!("{:#?}", fixture.db); let validator_pubkey = fixture.cluster.validator_metadata.validator_pubkey; let updated_fee_recipient = Address::random(); From 05a4c24fdc201bdfcc7ec9dff2f1892db1e0946f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 13 Dec 2024 13:55:24 +0000 Subject: [PATCH 23/50] additional tests & bugfix on validator generation --- anchor/database/src/tests/cluster_tests.rs | 3 + anchor/database/src/tests/mod.rs | 1 + anchor/database/src/tests/state_tests.rs | 8 +- anchor/database/src/tests/utils.rs | 119 ++++++++++++++++++- anchor/database/src/tests/validator_tests.rs | 82 ++++++++++--- 5 files changed, 191 insertions(+), 22 deletions(-) diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index bcbb0fd6..e21a0e73 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -31,6 +31,7 @@ mod cluster_database_tests { .delete_cluster(fixture.cluster.cluster_id) .expect("Failed to delete cluster"); assertions::assert_cluster_exists_not_fully(&fixture.db, &fixture.cluster); + assertions::assert_cluster_exists_not_in_store(&fixture.db, &fixture.cluster); } #[test] @@ -83,6 +84,8 @@ mod cluster_database_tests { // make sure they are in the db and state store is expected assertions::assert_cluster_exists_fully(&fixture.db, &cluster1); assertions::assert_cluster_exists_fully(&fixture.db, &cluster2); + assertions::assert_cluster_exists_in_store(&fixture.db, &cluster1); + assertions::assert_cluster_exists_in_store(&fixture.db, &cluster2); } #[test] diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs index a5db2e43..262bfa39 100644 --- a/anchor/database/src/tests/mod.rs +++ b/anchor/database/src/tests/mod.rs @@ -9,6 +9,7 @@ pub mod test_prelude { pub use crate::NetworkDatabase; pub use ssv_types::*; pub use tempfile::tempdir; + pub use types::{Address, Graffiti, PublicKey}; } #[cfg(test)] diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index eadbc022..e5ab9dca 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -15,10 +15,11 @@ mod state_database_tests { fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) .expect("Failed to create database"); - // confirm that all of the operators exist were - for operator in fixture.operators { - assertions::assert_operator_exists_fully(&fixture.db, &operator); + // confirm that all of the operators exist + for operator in &fixture.operators { + assertions::assert_operator_exists_fully(&fixture.db, operator); } + assertions::assert_operators_exists_in_store(&fixture.db, &fixture.operators); } #[test] @@ -35,6 +36,7 @@ mod state_database_tests { // Confirm all cluster related data is still correct assertions::assert_cluster_exists_fully(&fixture.db, &cluster); + assertions::assert_cluster_exists_in_store(&fixture.db, &cluster); } #[test] diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index c5abae16..f4d24e34 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -6,7 +6,6 @@ use rusqlite::{params, OptionalExtension}; use std::path::PathBuf; use tempfile::TempDir; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{Address, Graffiti, PublicKey}; const DEFAULT_NUM_OPERATORS: u64 = 4; const RSA_KEY_SIZE: u32 = 2048; @@ -172,12 +171,15 @@ pub mod generators { use super::*; pub fn random_metadata() -> ValidatorMetadata { + // When a validator is added to the network, fee_recipient = owner. This must be updated + // with another tx + let recipient_owner = Address::random(); ValidatorMetadata { validator_index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), validator_pubkey: pubkey::random(), - fee_recipient: Address::random(), + fee_recipient: recipient_owner, graffiti: Graffiti::default(), - owner: Address::random(), + owner: recipient_owner, } } } @@ -263,6 +265,7 @@ pub mod queries { /// Database assertions for testing pub mod assertions { + use super::*; pub fn assert_operator_exists_fully(db: &NetworkDatabase, operator: &Operator) { @@ -315,6 +318,116 @@ pub mod assertions { ); } + pub fn assert_operators_exists_in_store(db: &NetworkDatabase, operators: &Vec) { + // Verify each operator exists in the in-memory state + for operator in operators { + // Check operator exists in memory state + let stored_operator = db + .state + .operators + .get(&operator.id) + .expect("Operator should exist in memory state"); + + // Verify all fields match + assert_eq!(stored_operator.id, operator.id, "Operator ID mismatch"); + assert_eq!( + stored_operator.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap(), + "Operator public key mismatch" + ); + assert_eq!( + stored_operator.owner, operator.owner, + "Operator owner mismatch" + ); + } + } + + // Verifies that the cluster does not exist in the state store + pub fn assert_cluster_exists_not_in_store(db: &NetworkDatabase, cluster: &Cluster) { + let cluster_id = cluster.cluster_id; + assert!(!db.state.clusters.contains(&cluster_id)); + assert!(!db.state.shares.contains_key(&cluster_id)); + assert!(!db.state.validator_metadata.contains_key(&cluster_id)); + assert!(!db.state.cluster_members.contains_key(&cluster_id)); + assert!(!db.state.cluster_members.contains_key(&cluster_id)); + } + + // Verifies that the cluster exists correctly in the state store + pub fn assert_cluster_exists_in_store(db: &NetworkDatabase, cluster: &Cluster) { + // Verify cluster is recorded in memory state + assert!( + db.state.clusters.contains(&cluster.cluster_id), + "Cluster ID not found in memory state" + ); + + // Verify all operators exist and are cluster members + let operator_ids: Vec = cluster + .cluster_members + .iter() + .map(|c| c.operator_id) + .collect(); + + for id in operator_ids { + // Check operator exists + assert!( + db.operator_exists(&id), + "Operator {} not found in database", + *id + ); + + // Check operator is recorded as cluster member + assert!( + db.state.cluster_members[&cluster.cluster_id].contains(&id), + "Operator {} not recorded as cluster member in memory state", + *id + ); + } + + // Verify validator metadata matches + let validator_metadata = db.state.validator_metadata[&cluster.cluster_id].clone(); + assert_eq!( + validator_metadata.owner, cluster.validator_metadata.owner, + "Validator owner mismatch" + ); + assert_eq!( + validator_metadata.validator_index, cluster.validator_metadata.validator_index, + "Validator index mismatch" + ); + assert_eq!( + validator_metadata.fee_recipient, cluster.validator_metadata.fee_recipient, + "Fee recipient mismatch" + ); + assert_eq!( + validator_metadata.graffiti, cluster.validator_metadata.graffiti, + "Graffiti mismatch" + ); + + // Verify share exists for this cluster + assert!( + db.state.shares.contains_key(&cluster.cluster_id), + "No share found for cluster" + ); + + // Verify share data matches if we're a member + if let Some(our_id) = db.state.id { + if let Some(our_member) = cluster + .cluster_members + .iter() + .find(|m| m.operator_id == our_id) + { + let stored_share = db.state.shares[&cluster.cluster_id].clone(); + assert_eq!( + stored_share.share_pubkey, our_member.share.share_pubkey, + "Share public key mismatch" + ); + assert_eq!( + stored_share.encrypted_private_key, our_member.share.encrypted_private_key, + "Encrypted private key mismatch" + ); + } + } + } + /// Verifies that a cluster exists and all its data is correctly stored pub fn assert_cluster_exists_fully(db: &NetworkDatabase, cluster: &Cluster) { // Check cluster base data diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index a133ef34..6a1df146 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -3,34 +3,84 @@ use super::test_prelude::*; #[cfg(test)] mod validator_database_tests { use super::*; - use types::Address; #[test] - /// Test updating the fee recipient address + // Test updating the fee recipient fn test_update_fee_recipient() { let mut fixture = TestFixture::new(); + let cluster = &fixture.cluster; + let new_address = Address::random(); - let validator_pubkey = fixture.cluster.validator_metadata.validator_pubkey; - let updated_fee_recipient = Address::random(); - let cluster_id = fixture.cluster.cluster_id; + // Update fee recipient fixture .db - .update_fee_recipient(cluster_id, validator_pubkey.clone(), updated_fee_recipient) + .update_fee_recipient( + cluster.cluster_id, + cluster.validator_metadata.validator_pubkey.clone(), + new_address, + ) .expect("Failed to update fee recipient"); - // make sure the state store has changed, then check the db + // Verify update in memory state + let metadata = &fixture.db.state.validator_metadata[&cluster.cluster_id]; assert_eq!( - updated_fee_recipient, - fixture - .db - .get_fee_recipient(&cluster_id) - .expect("Failed to get fee recipient") + metadata.fee_recipient, new_address, + "Fee recipient not updated in memory" ); + + // Verify update in database + let validator = queries::get_validator( + &fixture.db, + &cluster.validator_metadata.validator_pubkey.to_string(), + ) + .expect("Validator not found in database"); assert_eq!( - updated_fee_recipient.to_string(), - queries::get_validator(&fixture.db, &(validator_pubkey.to_string())) - .expect("Failed to fetch Validator") - .3 + validator.3, + new_address.to_string(), + "Fee recipient not updated in database" + ); + } + + #[test] + /// Test updating the graffiti of a validator + fn test_update_graffiti() { + let mut fixture = TestFixture::new(); + let cluster = &fixture.cluster; + let new_graffiti = Graffiti::default(); // Or create a specific test graffiti + + // Update graffiti + fixture + .db + .update_graffiti( + cluster.cluster_id, + cluster.validator_metadata.validator_pubkey.clone(), + new_graffiti, + ) + .expect("Failed to update graffiti"); + + // Verify update in memory state + let metadata = &fixture.db.state.validator_metadata[&cluster.cluster_id]; + assert_eq!( + metadata.graffiti, new_graffiti, + "Graffiti not updated in memory" + ); + } + + #[test] + /// Test updating the fee recipient of a validator that does not exist + fn test_update_validator_nonexistent_cluster() { + let mut fixture = TestFixture::new(); + let nonexistent_cluster_id = ClusterId(*fixture.cluster.cluster_id + 1); + + let result = fixture.db.update_fee_recipient( + nonexistent_cluster_id, + fixture.cluster.validator_metadata.validator_pubkey.clone(), + Address::random(), + ); + + assert!( + result.is_err(), + "Should fail when updating non-existent cluster" ); } } From f8999b6f50aa91881561e081c43ee011c4f4d007 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 13 Dec 2024 14:55:48 +0000 Subject: [PATCH 24/50] break up assertions, basic comments --- anchor/database/src/tests/cluster_tests.rs | 11 +- anchor/database/src/tests/operator_tests.rs | 9 +- anchor/database/src/tests/state_tests.rs | 6 +- anchor/database/src/tests/utils.rs | 242 +++++++++----------- 4 files changed, 122 insertions(+), 146 deletions(-) diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index e21a0e73..e4267f21 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -8,7 +8,8 @@ mod cluster_database_tests { // Test inserting a cluster into the database fn test_insert_retrieve_cluster() { let fixture = TestFixture::new(); - assertions::assert_cluster_exists_fully(&fixture.db, &fixture.cluster); + assertions::assert_cluster_exists_in_db(&fixture.db, &fixture.cluster); + assertions::assert_cluster_exists_in_store(&fixture.db, &fixture.cluster); } #[test] @@ -30,7 +31,7 @@ mod cluster_database_tests { .db .delete_cluster(fixture.cluster.cluster_id) .expect("Failed to delete cluster"); - assertions::assert_cluster_exists_not_fully(&fixture.db, &fixture.cluster); + assertions::assert_cluster_exists_not_in_db(&fixture.db, &fixture.cluster); assertions::assert_cluster_exists_not_in_store(&fixture.db, &fixture.cluster); } @@ -63,7 +64,7 @@ mod cluster_database_tests { let mut operators: Vec = (0..3).map(generators::operator::with_id).collect(); operators.push(us_operator); - // inset all of teh operators + // inset all of the operators for op in &operators { fixture .db @@ -82,8 +83,8 @@ mod cluster_database_tests { } // make sure they are in the db and state store is expected - assertions::assert_cluster_exists_fully(&fixture.db, &cluster1); - assertions::assert_cluster_exists_fully(&fixture.db, &cluster2); + assertions::assert_cluster_exists_in_db(&fixture.db, &cluster1); + assertions::assert_cluster_exists_in_db(&fixture.db, &cluster2); assertions::assert_cluster_exists_in_store(&fixture.db, &cluster1); assertions::assert_cluster_exists_in_store(&fixture.db, &cluster2); } diff --git a/anchor/database/src/tests/operator_tests.rs b/anchor/database/src/tests/operator_tests.rs index 101c69b6..5b27659d 100644 --- a/anchor/database/src/tests/operator_tests.rs +++ b/anchor/database/src/tests/operator_tests.rs @@ -19,7 +19,8 @@ mod operator_database_tests { .expect("Failed to insert operator"); // Confirm that it exists both in the db and the state store - assertions::assert_operator_exists_fully(&fixture.db, &operator); + assertions::assert_operator_exists_in_db(&fixture.db, &operator); + assertions::assert_operator_exists_in_store(&fixture.db, &operator); } #[test] @@ -62,7 +63,8 @@ mod operator_database_tests { .expect("Failed to delete operator"); // Confirm that it is gone - assertions::assert_operator_not_exists_fully(&fixture.db, operator.id); + assertions::assert_operator_not_exists_in_db(&fixture.db, operator.id); + assertions::assert_operator_not_exists_in_store(&fixture.db, operator.id); } #[test] @@ -86,7 +88,8 @@ mod operator_database_tests { .db .delete_operator(operator.id) .expect("Failed to delete operator"); - assertions::assert_operator_not_exists_fully(&fixture.db, operator.id); + assertions::assert_operator_not_exists_in_db(&fixture.db, operator.id); + assertions::assert_operator_not_exists_in_store(&fixture.db, operator.id); } } diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index e5ab9dca..eef1c48f 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -17,9 +17,9 @@ mod state_database_tests { // confirm that all of the operators exist for operator in &fixture.operators { - assertions::assert_operator_exists_fully(&fixture.db, operator); + assertions::assert_operator_exists_in_db(&fixture.db, operator); + assertions::assert_operator_exists_in_store(&fixture.db, operator); } - assertions::assert_operators_exists_in_store(&fixture.db, &fixture.operators); } #[test] @@ -35,7 +35,7 @@ mod state_database_tests { .expect("Failed to create database"); // Confirm all cluster related data is still correct - assertions::assert_cluster_exists_fully(&fixture.db, &cluster); + assertions::assert_cluster_exists_in_db(&fixture.db, &cluster); assertions::assert_cluster_exists_in_store(&fixture.db, &cluster); } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index f4d24e34..b890cb10 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -23,8 +23,10 @@ pub struct TestFixture { } impl TestFixture { + // Generate a database that is populated with a full cluster. We are a member of the cluster so + // the in state store will also be populated pub fn new() -> Self { - // Generate the operators first so we can pick one to be us + // generate the operators and pick the first one to be us let operators: Vec = (0..DEFAULT_NUM_OPERATORS) .map(generators::operator::with_id) .collect(); @@ -37,7 +39,6 @@ impl TestFixture { let temp_dir = TempDir::new().expect("Failed to create temporary directory"); let db_path = temp_dir.path().join("test.db"); let mut db = NetworkDatabase::new(&db_path, &us).expect("Failed to create DB"); - operators.iter().for_each(|op| { db.insert_operator(op).expect("Failed to insert operator"); }); @@ -56,6 +57,7 @@ impl TestFixture { } } + // Generate an emtpy database and pick a random public key to be us pub fn new_empty() -> Self { let temp_dir = TempDir::new().expect("Failed to create temporary directory"); let db_path = temp_dir.path().join("test.db"); @@ -78,6 +80,7 @@ impl TestFixture { pub mod generators { use super::*; + // Generate a random operator. Either with a specific id or a specific public key pub mod operator { use super::*; @@ -95,6 +98,7 @@ pub mod generators { pub mod cluster { use super::*; + // Generate a fully cluster with a configurable number of operators pub fn random(num_operators: u64) -> Cluster { let cluster_id = ClusterId(rand::thread_rng().gen::().into()); let members = (0..num_operators) @@ -110,6 +114,7 @@ pub mod generators { } } + // Generate a cluster with a specific set of operators pub fn with_operators(operators: &[Operator]) -> Cluster { let cluster_id = ClusterId(rand::thread_rng().gen::().into()); let members = operators @@ -130,6 +135,7 @@ pub mod generators { pub mod member { use super::*; + // Generate a new cluster member for a cluster and operator pub fn new(cluster_id: ClusterId, operator_id: OperatorId) -> ClusterMember { ClusterMember { operator_id, @@ -142,6 +148,7 @@ pub mod generators { pub mod share { use super::*; + // Generate a random keyshare pub fn random() -> Share { Share { share_pubkey: pubkey::random(), @@ -153,6 +160,7 @@ pub mod generators { pub mod pubkey { use super::*; + // Generate a random RSA public key for operators pub fn random_rsa() -> Rsa { let priv_key = Rsa::generate(RSA_KEY_SIZE).expect("Failed to generate RSA key"); priv_key @@ -161,6 +169,7 @@ pub mod generators { .expect("Failed to process RSA key") } + // Generate a random public key for validators pub fn random() -> PublicKey { let rng = &mut XorShiftRng::from_seed(DEFAULT_SEED); PublicKey::random_for_test(rng) @@ -170,9 +179,9 @@ pub mod generators { pub mod validator { use super::*; + // Generate random ValidatorMetdata + // assumes fee_recipient = owner. pub fn random_metadata() -> ValidatorMetadata { - // When a validator is added to the network, fee_recipient = owner. This must be updated - // with another tx let recipient_owner = Address::random(); ValidatorMetadata { validator_index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), @@ -185,10 +194,12 @@ pub mod generators { } } -/// Database queries for testing +// Database queries for testing +// This will extract information corresponding to the original tables pub mod queries { use super::*; + // Get an operator from the database pub fn get_operator(db: &NetworkDatabase, id: OperatorId) -> Option { let conn = db.connection().unwrap(); let operators = conn.prepare("SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1") @@ -198,6 +209,7 @@ pub mod queries { operators } + // Get a Cluster from the database pub fn get_cluster(db: &NetworkDatabase, id: ClusterId) -> Option<(i64, i64, bool)> { let conn = db.connection().unwrap(); let cluster = conn @@ -211,6 +223,7 @@ pub mod queries { cluster } + // Get a share from the database pub fn get_shares( db: &NetworkDatabase, cluster_id: ClusterId, @@ -235,6 +248,7 @@ pub mod queries { shares } + // Get a ClusterMember from the database pub fn get_cluster_member( db: &NetworkDatabase, cluster_id: ClusterId, @@ -249,6 +263,7 @@ pub mod queries { member } + // Get ValidatorMetadata from the database pub fn get_validator( db: &NetworkDatabase, validator_pubkey: &str, @@ -268,82 +283,39 @@ pub mod assertions { use super::*; - pub fn assert_operator_exists_fully(db: &NetworkDatabase, operator: &Operator) { - // Check in-memory state - let fetched = db - .get_operator(&operator.id) - .expect("Operator not found in memory"); - - assert_eq!(fetched.id, operator.id, "Operator ID mismatch in memory"); - assert_eq!( - fetched.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap(), - "Operator public key mismatch in memory" - ); - assert_eq!( - fetched.owner, operator.owner, - "Operator owner mismatch in memory" - ); + // State Store (In-Memory) Assertions + // These assertions verify the in memory state maintained by the application - // Check database state - let db_operator = - queries::get_operator(db, operator.id).expect("Operator not found in database"); + // Verifies that the operator is in the state store + pub fn assert_operator_exists_in_store(db: &NetworkDatabase, operator: &Operator) { + // Check operator exists in memory state + let stored_operator = db + .state + .operators + .get(&operator.id) + .expect("Operator should exist in memory state"); + // Verify all fields match + assert_eq!(stored_operator.id, operator.id, "Operator ID mismatch"); assert_eq!( - db_operator.rsa_pubkey.public_key_to_pem().unwrap(), + stored_operator.rsa_pubkey.public_key_to_pem().unwrap(), operator.rsa_pubkey.public_key_to_pem().unwrap(), - "Operator public key mismatch in database" - ); - assert_eq!( - db_operator.id, operator.id, - "Operator ID mismatch in database" + "Operator public key mismatch" ); assert_eq!( - db_operator.owner, operator.owner, - "Operator owner mismatch in database" - ); - } - - pub fn assert_operator_not_exists_fully(db: &NetworkDatabase, operator_id: OperatorId) { - // Check memory - assert!( - db.get_operator(&operator_id).is_none(), - "Operator still exists in memory" - ); - - // Check database - assert!( - queries::get_operator(db, operator_id).is_none(), - "Operator still exists in database" + stored_operator.owner, operator.owner, + "Operator owner mismatch" ); } - pub fn assert_operators_exists_in_store(db: &NetworkDatabase, operators: &Vec) { - // Verify each operator exists in the in-memory state - for operator in operators { - // Check operator exists in memory state - let stored_operator = db - .state - .operators - .get(&operator.id) - .expect("Operator should exist in memory state"); - - // Verify all fields match - assert_eq!(stored_operator.id, operator.id, "Operator ID mismatch"); - assert_eq!( - stored_operator.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap(), - "Operator public key mismatch" - ); - assert_eq!( - stored_operator.owner, operator.owner, - "Operator owner mismatch" - ); - } + // Verifies that the operator is not in the state store + pub fn assert_operator_not_exists_in_store(db: &NetworkDatabase, operator: OperatorId) { + assert!(!db.state.operators.contains_key(&operator)); } // Verifies that the cluster does not exist in the state store pub fn assert_cluster_exists_not_in_store(db: &NetworkDatabase, cluster: &Cluster) { + // Just make sure we have 0 references to the cluster_id let cluster_id = cluster.cluster_id; assert!(!db.state.clusters.contains(&cluster_id)); assert!(!db.state.shares.contains_key(&cluster_id)); @@ -354,12 +326,7 @@ pub mod assertions { // Verifies that the cluster exists correctly in the state store pub fn assert_cluster_exists_in_store(db: &NetworkDatabase, cluster: &Cluster) { - // Verify cluster is recorded in memory state - assert!( - db.state.clusters.contains(&cluster.cluster_id), - "Cluster ID not found in memory state" - ); - + // - operators: HashMap, // Verify all operators exist and are cluster members let operator_ids: Vec = cluster .cluster_members @@ -375,6 +342,7 @@ pub mod assertions { *id ); + // - cluster_members: HashMap>, // Check operator is recorded as cluster member assert!( db.state.cluster_members[&cluster.cluster_id].contains(&id), @@ -383,6 +351,38 @@ pub mod assertions { ); } + // - clusters: HashSet, + // Verify cluster is recorded in memory state + assert!( + db.state.clusters.contains(&cluster.cluster_id), + "Cluster ID not found in memory state" + ); + + // - shares: HashMap, + // Verify shares exists and share data matches if we're a member + if let Some(our_id) = db.state.id { + if let Some(our_member) = cluster + .cluster_members + .iter() + .find(|m| m.operator_id == our_id) + { + let stored_share = db.state.shares[&cluster.cluster_id].clone(); + assert_eq!( + stored_share.share_pubkey, our_member.share.share_pubkey, + "Share public key mismatch" + ); + assert_eq!( + stored_share.encrypted_private_key, our_member.share.encrypted_private_key, + "Encrypted private key mismatch" + ); + } + } + assert!( + db.state.shares.contains_key(&cluster.cluster_id), + "No share found for cluster" + ); + + // - validator_metadata: HashMap, // Verify validator metadata matches let validator_metadata = db.state.validator_metadata[&cluster.cluster_id].clone(); assert_eq!( @@ -401,35 +401,42 @@ pub mod assertions { validator_metadata.graffiti, cluster.validator_metadata.graffiti, "Graffiti mismatch" ); + } - // Verify share exists for this cluster - assert!( - db.state.shares.contains_key(&cluster.cluster_id), - "No share found for cluster" + // Database (Persistent Storage) Assertions + // These assertions verify the persistent state in the SQLite dataabase + + // Verify that the operator is in the database + pub fn assert_operator_exists_in_db(db: &NetworkDatabase, operator: &Operator) { + let db_operator = + queries::get_operator(db, operator.id).expect("Operator not found in database"); + + assert_eq!( + db_operator.rsa_pubkey.public_key_to_pem().unwrap(), + operator.rsa_pubkey.public_key_to_pem().unwrap(), + "Operator public key mismatch in database" + ); + assert_eq!( + db_operator.id, operator.id, + "Operator ID mismatch in database" + ); + assert_eq!( + db_operator.owner, operator.owner, + "Operator owner mismatch in database" ); + } - // Verify share data matches if we're a member - if let Some(our_id) = db.state.id { - if let Some(our_member) = cluster - .cluster_members - .iter() - .find(|m| m.operator_id == our_id) - { - let stored_share = db.state.shares[&cluster.cluster_id].clone(); - assert_eq!( - stored_share.share_pubkey, our_member.share.share_pubkey, - "Share public key mismatch" - ); - assert_eq!( - stored_share.encrypted_private_key, our_member.share.encrypted_private_key, - "Encrypted private key mismatch" - ); - } - } + // Verify that the operator does not exist in the database + pub fn assert_operator_not_exists_in_db(db: &NetworkDatabase, operator_id: OperatorId) { + // Check database + assert!( + queries::get_operator(db, operator_id).is_none(), + "Operator still exists in database" + ); } - /// Verifies that a cluster exists and all its data is correctly stored - pub fn assert_cluster_exists_fully(db: &NetworkDatabase, cluster: &Cluster) { + // Verifies that a cluster exists in the database + pub fn assert_cluster_exists_in_db(db: &NetworkDatabase, cluster: &Cluster) { // Check cluster base data let (id, faulty, liquidated) = queries::get_cluster(db, cluster.cluster_id).expect("Cluster not found in database"); @@ -444,25 +451,6 @@ pub mod assertions { "Cluster liquidated status mismatch" ); - // Verify cluster is in memory if we're a member - if let Some(our_id) = db.state.id { - if cluster - .cluster_members - .iter() - .any(|m| m.operator_id == our_id) - { - assert!( - db.state.clusters.contains(&cluster.cluster_id), - "Cluster not found in memory state" - ); - assert_eq!( - db.state.cluster_members[&cluster.cluster_id].len(), - cluster.cluster_members.len(), - "Cluster members count mismatch in memory" - ); - } - } - // Verify cluster members for member in &cluster.cluster_members { let member_exists = @@ -504,24 +492,14 @@ pub mod assertions { ); } - /// Verifies that a cluster does not exist in any form - pub fn assert_cluster_exists_not_fully(db: &NetworkDatabase, cluster: &Cluster) { + // Verifies that a cluster does not exist in the database + pub fn assert_cluster_exists_not_in_db(db: &NetworkDatabase, cluster: &Cluster) { // Verify cluster base data is gone assert!( queries::get_cluster(db, cluster.cluster_id).is_none(), "Cluster still exists in database" ); - // Verify cluster is not in memory - assert!( - !db.state.clusters.contains(&cluster.cluster_id), - "Cluster still exists in memory state" - ); - assert!( - !db.state.cluster_members.contains_key(&cluster.cluster_id), - "Cluster members still exist in memory state" - ); - // Verify all cluster members are gone for member in &cluster.cluster_members { assert!( @@ -540,11 +518,5 @@ pub mod assertions { .is_none(), "Validator still exists in database" ); - assert!( - !db.state - .validator_metadata - .contains_key(&cluster.cluster_id), - "Validator metadata still exists in memory state" - ); } } From 9e4f289fafc5ad923d8d5d72999175939b100f65 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 13 Dec 2024 20:24:49 +0000 Subject: [PATCH 25/50] migrate to immutable api with fine grained state locking --- Cargo.lock | 82 ++++----- anchor/database/Cargo.toml | 1 + anchor/database/src/cluster_operations.rs | 87 ++++------ anchor/database/src/lib.rs | 27 ++- anchor/database/src/operator_operations.rs | 47 +++--- anchor/database/src/share_operations.rs | 2 +- anchor/database/src/state.rs | 50 ++++-- anchor/database/src/tests/cluster_tests.rs | 18 +- anchor/database/src/tests/operator_tests.rs | 10 +- anchor/database/src/tests/state_tests.rs | 8 +- anchor/database/src/tests/utils.rs | 167 ++++++++++--------- anchor/database/src/tests/validator_tests.rs | 10 +- anchor/database/src/validator_operations.rs | 38 +++-- 13 files changed, 286 insertions(+), 261 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19874d6c..5ff2d50c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -896,7 +896,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "alloy-primitives", "arbitrary", @@ -1027,9 +1027,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.3" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27f657647bcff5394bf56c7317665bbf790a137a50eaaa5c6bfbb9e27a518f2d" +checksum = "9157bbaa6b165880c27a4293a474c91cdcf265cc68cc829bf10be0964a391caf" dependencies = [ "jobserver", "libc", @@ -1165,7 +1165,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "alloy-primitives", "clap", @@ -1229,7 +1229,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "itertools 0.10.5", ] @@ -1246,7 +1246,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "quote", "syn 1.0.109", @@ -1668,6 +1668,7 @@ version = "0.1.0" dependencies = [ "base64 0.22.1", "openssl", + "parking_lot 0.12.3", "r2d2", "r2d2_sqlite", "rand", @@ -1852,7 +1853,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2316,7 +2317,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2338,7 +2339,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2406,7 +2407,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "bytes", "discv5 0.9.0", @@ -2810,7 +2811,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2986,7 +2987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", ] @@ -3156,7 +3157,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "async-channel", "asynchronous-codec", @@ -3953,7 +3954,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "bytes", ] @@ -4149,7 +4150,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "arbitrary", "c-kzg", @@ -4546,7 +4547,7 @@ dependencies = [ "quinn", "rand", "ring 0.17.8", - "rustls 0.23.19", + "rustls 0.23.20", "socket2 0.5.8", "thiserror 1.0.69", "tokio", @@ -4618,7 +4619,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.17.8", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-webpki 0.101.7", "thiserror 1.0.69", "x509-parser", @@ -4789,7 +4790,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4847,7 +4848,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "git-version", "target_info", @@ -4926,7 +4927,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -4974,7 +4975,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "fnv", ] @@ -5050,7 +5051,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5092,7 +5093,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "prometheus", ] @@ -5934,7 +5935,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "reqwest", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6193,7 +6194,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash 2.1.0", - "rustls 0.23.19", + "rustls 0.23.20", "socket2 0.5.8", "thiserror 2.0.6", "tokio", @@ -6211,7 +6212,7 @@ dependencies = [ "rand", "ring 0.17.8", "rustc-hash 2.1.0", - "rustls 0.23.19", + "rustls 0.23.20", "rustls-pki-types", "slab", "thiserror 2.0.6", @@ -6690,7 +6691,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.23", + "semver 1.0.24", ] [[package]] @@ -6757,9 +6758,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "ring 0.17.8", @@ -6789,9 +6790,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" dependencies = [ "web-time", ] @@ -6860,7 +6861,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" [[package]] name = "salsa20" @@ -7009,9 +7010,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "semver-parser" @@ -7034,7 +7035,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "serde", "url", @@ -7656,7 +7657,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -7785,7 +7786,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "async-channel", "futures", @@ -7843,7 +7844,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "quote", "syn 1.0.109", @@ -8358,7 +8359,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8536,7 +8537,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#c5a48a9dffc82e5e18d24ca7f2ab3671c9ad8469" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -9334,6 +9335,7 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ + "serde", "zeroize_derive", ] diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml index e23fee8b..b73391de 100644 --- a/anchor/database/Cargo.toml +++ b/anchor/database/Cargo.toml @@ -12,6 +12,7 @@ ssv_types = { workspace = true } types = { workspace = true } base64 = { workspace = true } openssl = { workspace = true } +parking_lot = { workspace = true } [dev-dependencies] rand = "0.8.5" diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 51ee4983..1a066733 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -5,15 +5,7 @@ use ssv_types::{Cluster, ClusterId}; /// Implements all cluster related functionality on the database impl NetworkDatabase { /// Inserts a new cluster into the database - pub fn insert_cluster(&mut self, cluster: Cluster) -> Result<(), DatabaseError> { - // Make sure this cluster does not exists - if self.state.clusters.contains(&cluster.cluster_id) { - return Err(DatabaseError::AlreadyPresent(format!( - "Cluster with id {} already in database", - *cluster.cluster_id - ))); - } - + pub fn insert_cluster(&self, cluster: Cluster) -> Result<(), DatabaseError> { let mut conn = self.connection()?; let tx = conn.transaction()?; @@ -30,13 +22,17 @@ impl NetworkDatabase { ])?; // Insert all of the members and their shares + let mut member_in_cluster = false; + let own_id = self.read_state(|state| state.id); cluster.cluster_members.iter().try_for_each(|member| { - if let Some(id) = self.state.id { + // check if we are a member in this cluster + if let Some(id) = own_id { if id == member.operator_id { member_in_cluster = true; } } + // insert the cluster member and their share tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? .execute(params![*member.cluster_id, *member.operator_id])?; self.insert_share( @@ -53,38 +49,33 @@ impl NetworkDatabase { // If we are a member in this cluster, store relevant information if member_in_cluster { - let cluster_id = cluster.cluster_id; - // Store the cluster_id since we are a part of this cluster - self.state.clusters.insert(cluster_id); - cluster.cluster_members.iter().for_each(|member| { - // Store all of the operators that are a member of this cluster - self.state - .cluster_members - .entry(cluster_id) - .or_default() - .insert(member.operator_id); - // Store our share of the key - if member.operator_id == self.state.id.expect("Guaranteed to be populated") { - self.state.shares.insert(cluster_id, member.share.clone()); - } + self.modify_state(|state| { + let cluster_id = cluster.cluster_id; + // Store the cluster_id since we are a part of this cluster + state.clusters.insert(cluster_id); + cluster.cluster_members.iter().for_each(|member| { + // Store all of the operators that are a member of this cluster + state + .cluster_members + .entry(cluster_id) + .or_default() + .insert(member.operator_id); + // Store our share of the key + if member.operator_id == state.id.expect("Guaranteed to be populated") { + state.shares.insert(cluster_id, member.share.clone()); + } + }); + // Store the metadata of the validator for the cluster + state + .validator_metadata + .insert(cluster_id, cluster.validator_metadata); }); - // Store the metadata of the validator for the cluster - self.state - .validator_metadata - .insert(cluster_id, cluster.validator_metadata); } Ok(()) } /// Mark the cluster as liquidated or active - pub fn update_status(&mut self, id: ClusterId, status: bool) -> Result<(), DatabaseError> { - if !self.state.clusters.contains(&id) { - return Err(DatabaseError::NotFound(format!( - "Cluster with id {} not in database", - *id - ))); - } - + pub fn update_status(&self, id: ClusterId, status: bool) -> Result<(), DatabaseError> { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateClusterStatus])? .execute(params![status, *id])?; @@ -94,26 +85,20 @@ impl NetworkDatabase { /// Delete a cluster from the database. This will cascade and delete all corresponding cluster /// members, shares, and validator metadata /// This corresponds to a validator being removed or exiting - pub fn delete_cluster(&mut self, id: ClusterId) -> Result<(), DatabaseError> { - // Make sure this cluster exists - if !self.state.clusters.contains(&id) { - return Err(DatabaseError::NotFound(format!( - "Cluster with id {} not in database", - *id - ))); - } - + pub fn delete_cluster(&self, id: ClusterId) -> Result<(), DatabaseError> { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::DeleteCluster])? .execute(params![*id])?; // If we are a member of this cluster, remove all relevant information - if self.state.clusters.contains(&id) { - self.state.clusters.remove(&id); - self.state.shares.remove(&id); - self.state.validator_metadata.remove(&id); - self.state.cluster_members.remove(&id); - } + self.modify_state(|state| { + if state.clusters.contains(&id) { + state.clusters.remove(&id); + state.shares.remove(&id); + state.validator_metadata.remove(&id); + state.cluster_members.remove(&id); + } + }); Ok(()) } } diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index f68e9a92..f5169f60 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,5 +1,6 @@ pub use crate::error::DatabaseError; use openssl::{pkey::Public, rsa::Rsa}; +use parking_lot::RwLock; use r2d2_sqlite::SqliteConnectionManager; use rusqlite::params; use ssv_types::{ClusterId, Operator, OperatorId, Share, ValidatorMetadata}; @@ -46,12 +47,12 @@ struct NetworkState { /// Top level NetworkDatabase that contains in memory storage for quick access /// to relevant information and a connection to the database -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct NetworkDatabase { /// The public key of our operator pubkey: Rsa, /// Custom state stores for easy data access - state: NetworkState, + state: RwLock, /// Connection to the database conn_pool: Pool, } @@ -60,7 +61,7 @@ impl NetworkDatabase { /// Construct a new NetworkDatabase at the given path and the Public Key of our operator. pub fn new(path: &Path, pubkey: &Rsa) -> Result { let conn_pool = Self::open_or_create(path)?; - let state = NetworkState::new_with_state(&conn_pool, pubkey)?; + let state = RwLock::new(NetworkState::new_with_state(&conn_pool, pubkey)?); Ok(Self { pubkey: pubkey.clone(), state, @@ -68,12 +69,28 @@ impl NetworkDatabase { }) } + pub(crate) fn read_state(&self, f: F) -> R + where + F: FnOnce(&NetworkState) -> R, + { + let state = self.state.read(); + f(&state) + } + + fn modify_state(&self, f: F) -> R + where + F: FnOnce(&mut NetworkState) -> R, + { + let mut state = self.state.write(); + f(&mut state) + } + /// Update the last processed block number in the database - pub fn processed_block(&mut self, block_number: u64) -> Result<(), DatabaseError> { + pub fn processed_block(&self, block_number: u64) -> Result<(), DatabaseError> { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateBlockNumber])? .execute(params![block_number])?; - self.state.last_processed_block = block_number; + self.modify_state(|state| state.last_processed_block = block_number); Ok(()) } diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index a9d0019f..ebbaf054 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -7,28 +7,15 @@ use ssv_types::{Operator, OperatorId}; /// Implements all operator related functionality on the database impl NetworkDatabase { /// Insert a new operator into the database - pub fn insert_operator(&mut self, operator: &Operator) -> Result<(), DatabaseError> { + pub fn insert_operator(&self, operator: &Operator) -> Result<(), DatabaseError> { // make sure that this operator does not already exist - if self.state.operators.contains_key(&operator.id) { + if self.operator_exists(&operator.id) { return Err(DatabaseError::NotFound(format!( - "Operator with id {} not in database", + "Operator with id {} already in database", *operator.id ))); } - // Check if this operator is us - if self.state.id.is_none() { - let keys_match = operator - .rsa_pubkey - .public_key_to_pem() - .and_then(|key1| self.pubkey.public_key_to_pem().map(|key2| key1 == key2)) - .unwrap_or(false); - if keys_match { - self.state.id = Some(operator.id); - } - } - - // encode the key let encoded = BASE64_STANDARD.encode( operator .rsa_pubkey @@ -40,16 +27,32 @@ impl NetworkDatabase { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::InsertOperator])? .execute(params![*operator.id, encoded, operator.owner.to_string()])?; - self.state.operators.insert(operator.id, operator.clone()); + + // Check to see if this operator is us and insert it into db + //self.state.operators.insert(operator.id, operator.clone()); + self.modify_state(|state| { + if state.id.is_none() { + let keys_match = operator + .rsa_pubkey + .public_key_to_pem() + .and_then(|key1| self.pubkey.public_key_to_pem().map(|key2| key1 == key2)) + .unwrap_or(false); + if keys_match { + state.id = Some(operator.id); + } + } + + state.operators.insert(operator.id, operator.clone()); + }); Ok(()) } /// Delete an operator - pub fn delete_operator(&mut self, id: OperatorId) -> Result<(), DatabaseError> { - // make sure that it exists - if !self.state.operators.contains_key(&id) { + pub fn delete_operator(&self, id: OperatorId) -> Result<(), DatabaseError> { + // make sure that this operator exists + if !self.operator_exists(&id) { return Err(DatabaseError::NotFound(format!( - "Operator with id {} not in database", + "Operator with id {} already in database", *id ))); } @@ -61,7 +64,7 @@ impl NetworkDatabase { .execute(params![*id])?; // Remove the operator - self.state.operators.remove(&id); + self.modify_state(|state| state.operators.remove(&id)); Ok(()) } } diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index 60a7346a..a1125d18 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -6,7 +6,7 @@ use types::PublicKey; /// Implements all Share related functionality on the database impl NetworkDatabase { pub(crate) fn insert_share( - &mut self, + &self, tx: &Transaction<'_>, share: &Share, cluster_id: ClusterId, diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index f69749ed..6c1bc84b 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -157,48 +157,62 @@ impl NetworkState { // Clean interface for accessing network state impl NetworkDatabase { /// Get operator data from in-memory store - pub fn get_operator(&self, id: &OperatorId) -> Option<&Operator> { - self.state.operators.get(id) + pub fn get_operator(&self, id: &OperatorId) -> Option { + self.read_state(|state| state.operators.get(id).cloned()) } /// Check if an operator exists pub fn operator_exists(&self, id: &OperatorId) -> bool { - self.state.operators.contains_key(id) + self.read_state(|state| state.operators.contains_key(id)) + } + + /// Check if a cluster exists + pub fn cluster_exists(&self, id: &ClusterId) -> bool { + self.read_state(|state| state.clusters.contains(id)) } /// Check if we are a member of a specific cluster pub fn member_of_cluster(&self, id: &ClusterId) -> bool { - self.state.clusters.contains(id) + self.read_state(|state| state.clusters.contains(id)) } /// Get own share of key for a Cluster we are a member in - pub fn get_share(&self, id: &ClusterId) -> Option<&Share> { - self.state.shares.get(id) + pub fn get_share(&self, id: &ClusterId) -> Option { + self.read_state(|state| state.shares.get(id).cloned()) } /// Set the id of our own operator - pub fn set_own_id(&mut self, id: OperatorId) { - self.state.id = Some(id); + pub fn set_own_id(&self, id: OperatorId) { + self.modify_state(|state| state.id = Some(id)) } /// Get the metatdata for the cluster - pub fn get_validator_metadata(&self, id: &ClusterId) -> Option<&ValidatorMetadata> { - self.state.validator_metadata.get(id) + pub fn get_validator_metadata(&self, id: &ClusterId) -> Option { + self.read_state(|state| state.validator_metadata.get(id).cloned()) + } + + /// Get the last block that has been fully processed by the database + pub fn get_last_processed_block(&self) -> u64 { + self.read_state(|state| state.last_processed_block) } /// Get the Fee Recipient address pub fn get_fee_recipient(&self, id: &ClusterId) -> Option
{ - self.state - .validator_metadata - .get(id) - .map(|metadata| metadata.fee_recipient) + self.read_state(|state| { + state + .validator_metadata + .get(id) + .map(|metadata| metadata.fee_recipient) + }) } /// Get the Validator Index pub fn get_validator_index(&self, id: &ClusterId) -> Option { - self.state - .validator_metadata - .get(id) - .map(|metadata| metadata.validator_index) + self.read_state(|state| { + state + .validator_metadata + .get(id) + .map(|metadata| metadata.validator_index) + }) } } diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index e4267f21..1e7adf49 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -15,7 +15,7 @@ mod cluster_database_tests { #[test] // Try inserting a cluster that does not already have registers operators in the database fn test_insert_cluster_without_operators() { - let mut fixture = TestFixture::new_empty(); + let fixture = TestFixture::new_empty(); let cluster = generators::cluster::random(3); fixture .db @@ -56,7 +56,7 @@ mod cluster_database_tests { #[test] // Test inserting two clusters that an operator is a member of fn test_insert_two_clusters() { - let mut fixture = TestFixture::new_empty(); + let fixture = TestFixture::new_empty(); let us_pubkey = fixture.pubkey; let us_operator = generators::operator::with_pubkey(us_pubkey); @@ -89,22 +89,10 @@ mod cluster_database_tests { assertions::assert_cluster_exists_in_store(&fixture.db, &cluster2); } - #[test] - // Test deleting a cluster that does not exist - fn test_delete_dne_cluster() { - let mut fixture = TestFixture::new(); - let dne_id = ClusterId(*fixture.cluster.cluster_id - 1); - - fixture - .db - .delete_cluster(dne_id) - .expect_err("Expected failure when deleting cluster that does not exist"); - } - #[test] // Test inserting a cluster that already exists fn test_duplicate_cluster_insert() { - let mut fixture = TestFixture::new(); + let fixture = TestFixture::new(); fixture .db .insert_cluster(fixture.cluster) diff --git a/anchor/database/src/tests/operator_tests.rs b/anchor/database/src/tests/operator_tests.rs index 5b27659d..c0f759a1 100644 --- a/anchor/database/src/tests/operator_tests.rs +++ b/anchor/database/src/tests/operator_tests.rs @@ -9,7 +9,7 @@ mod operator_database_tests { // state stores fn test_insert_retrieve_operator() { // Create a new text fixture with empty db - let mut fixture = TestFixture::new_empty(); + let fixture = TestFixture::new_empty(); // Generate a new operator and insert it let operator = generators::operator::with_id(1); @@ -27,7 +27,7 @@ mod operator_database_tests { // Ensure that we cannot insert a duplicate operator into the database fn test_duplicate_insert() { // Create a new test fixture with empty db - let mut fixture = TestFixture::new_empty(); + let fixture = TestFixture::new_empty(); // Generate a new operator and insert it let operator = generators::operator::with_id(1); @@ -47,7 +47,7 @@ mod operator_database_tests { // Test deleting an operator and confirming it is gone from the db and in memory fn test_insert_delete_operator() { // Create new test fixture with empty db - let mut fixture = TestFixture::new_empty(); + let fixture = TestFixture::new_empty(); // Generate a new operator and insert it let operator = generators::operator::with_id(1); @@ -71,7 +71,7 @@ mod operator_database_tests { // Test inserting multiple operators fn test_insert_multiple_operators() { // Create new test fixture with empty db - let mut fixture = TestFixture::new_empty(); + let fixture = TestFixture::new_empty(); // Generate and insert operators let operators: Vec = (0..4).map(generators::operator::with_id).collect(); @@ -96,7 +96,7 @@ mod operator_database_tests { #[test] /// Try to delete an operator that does not exist fn test_delete_dne_operator() { - let mut fixture = TestFixture::new_empty(); + let fixture = TestFixture::new_empty(); fixture .db .delete_operator(OperatorId(1)) diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index eef1c48f..faa0f9ba 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -42,13 +42,13 @@ mod state_database_tests { #[test] // Test that you can update and retrieve a block number fn test_block_number() { - let mut fixture = TestFixture::new(); - assert_eq!(fixture.db.state.last_processed_block, 0); + let fixture = TestFixture::new(); + assert_eq!(fixture.db.read_state(|state| state.last_processed_block), 0); fixture .db .processed_block(10) .expect("Failed to update the block number"); - assert_eq!(fixture.db.state.last_processed_block, 10); + assert_eq!(fixture.db.read_state(|state| state.last_processed_block), 10); } #[test] @@ -63,6 +63,6 @@ mod state_database_tests { fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) .expect("Failed to create database"); - assert_eq!(fixture.db.state.last_processed_block, 10); + assert_eq!(fixture.db.get_last_processed_block(), 10); } } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index b890cb10..d17ccc5d 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -38,7 +38,7 @@ impl TestFixture { let temp_dir = TempDir::new().expect("Failed to create temporary directory"); let db_path = temp_dir.path().join("test.db"); - let mut db = NetworkDatabase::new(&db_path, &us).expect("Failed to create DB"); + let db = NetworkDatabase::new(&db_path, &us).expect("Failed to create DB"); operators.iter().for_each(|op| { db.insert_operator(op).expect("Failed to insert operator"); }); @@ -289,11 +289,13 @@ pub mod assertions { // Verifies that the operator is in the state store pub fn assert_operator_exists_in_store(db: &NetworkDatabase, operator: &Operator) { // Check operator exists in memory state - let stored_operator = db - .state - .operators - .get(&operator.id) - .expect("Operator should exist in memory state"); + let stored_operator = db.read_state(|state| { + state + .operators + .get(&operator.id) + .expect("Operator should exist in memory state") + .clone() + }); // Verify all fields match assert_eq!(stored_operator.id, operator.id, "Operator ID mismatch"); @@ -310,97 +312,104 @@ pub mod assertions { // Verifies that the operator is not in the state store pub fn assert_operator_not_exists_in_store(db: &NetworkDatabase, operator: OperatorId) { - assert!(!db.state.operators.contains_key(&operator)); + assert!(!db.operator_exists(&operator)); } // Verifies that the cluster does not exist in the state store pub fn assert_cluster_exists_not_in_store(db: &NetworkDatabase, cluster: &Cluster) { // Just make sure we have 0 references to the cluster_id - let cluster_id = cluster.cluster_id; - assert!(!db.state.clusters.contains(&cluster_id)); - assert!(!db.state.shares.contains_key(&cluster_id)); - assert!(!db.state.validator_metadata.contains_key(&cluster_id)); - assert!(!db.state.cluster_members.contains_key(&cluster_id)); - assert!(!db.state.cluster_members.contains_key(&cluster_id)); + db.read_state(|state| { + let cluster_id = cluster.cluster_id; + assert!(!state.clusters.contains(&cluster_id)); + assert!(!state.shares.contains_key(&cluster_id)); + assert!(!state.validator_metadata.contains_key(&cluster_id)); + assert!(!state.cluster_members.contains_key(&cluster_id)); + assert!(!state.cluster_members.contains_key(&cluster_id)); + }); } // Verifies that the cluster exists correctly in the state store pub fn assert_cluster_exists_in_store(db: &NetworkDatabase, cluster: &Cluster) { // - operators: HashMap, // Verify all operators exist and are cluster members - let operator_ids: Vec = cluster - .cluster_members - .iter() - .map(|c| c.operator_id) - .collect(); + db.read_state(|state| { + let operator_ids: Vec = cluster + .cluster_members + .iter() + .map(|c| c.operator_id) + .collect(); - for id in operator_ids { - // Check operator exists - assert!( - db.operator_exists(&id), - "Operator {} not found in database", - *id - ); + for id in operator_ids { + // Check operator exists + assert!( + db.operator_exists(&id), + "Operator {} not found in database", + *id + ); + + // - cluster_members: HashMap>, + // Check operator is recorded as cluster member + assert!( + state.cluster_members[&cluster.cluster_id].contains(&id), + "Operator {} not recorded as cluster member in memory state", + *id + ); + } - // - cluster_members: HashMap>, - // Check operator is recorded as cluster member + // - clusters: HashSet, + // Verify cluster is recorded in memory state assert!( - db.state.cluster_members[&cluster.cluster_id].contains(&id), - "Operator {} not recorded as cluster member in memory state", - *id + state.clusters.contains(&cluster.cluster_id), + "Cluster ID not found in memory state" ); - } - - // - clusters: HashSet, - // Verify cluster is recorded in memory state - assert!( - db.state.clusters.contains(&cluster.cluster_id), - "Cluster ID not found in memory state" - ); - // - shares: HashMap, - // Verify shares exists and share data matches if we're a member - if let Some(our_id) = db.state.id { - if let Some(our_member) = cluster - .cluster_members - .iter() - .find(|m| m.operator_id == our_id) - { - let stored_share = db.state.shares[&cluster.cluster_id].clone(); - assert_eq!( - stored_share.share_pubkey, our_member.share.share_pubkey, - "Share public key mismatch" - ); - assert_eq!( - stored_share.encrypted_private_key, our_member.share.encrypted_private_key, - "Encrypted private key mismatch" - ); + // - shares: HashMap, + // Verify shares exists and share data matches if we're a member + if let Some(our_id) = state.id { + if let Some(our_member) = cluster + .cluster_members + .iter() + .find(|m| m.operator_id == our_id) + { + let stored_share = state.shares[&cluster.cluster_id].clone(); + assert_eq!( + stored_share.share_pubkey, our_member.share.share_pubkey, + "Share public key mismatch" + ); + assert_eq!( + stored_share.encrypted_private_key, our_member.share.encrypted_private_key, + "Encrypted private key mismatch" + ); + } } - } - assert!( - db.state.shares.contains_key(&cluster.cluster_id), - "No share found for cluster" - ); + assert!( + state.shares.contains_key(&cluster.cluster_id), + "No share found for cluster" + ); - // - validator_metadata: HashMap, - // Verify validator metadata matches - let validator_metadata = db.state.validator_metadata[&cluster.cluster_id].clone(); - assert_eq!( - validator_metadata.owner, cluster.validator_metadata.owner, - "Validator owner mismatch" - ); - assert_eq!( - validator_metadata.validator_index, cluster.validator_metadata.validator_index, - "Validator index mismatch" - ); - assert_eq!( - validator_metadata.fee_recipient, cluster.validator_metadata.fee_recipient, - "Fee recipient mismatch" - ); - assert_eq!( - validator_metadata.graffiti, cluster.validator_metadata.graffiti, - "Graffiti mismatch" - ); + // - validator_metadata: HashMap, + // Verify validator metadata matches + let validator_metadata = db + .get_validator_metadata(&cluster.cluster_id) + .expect("Failed to get metadata") + .clone(); + assert_eq!( + validator_metadata.owner, cluster.validator_metadata.owner, + "Validator owner mismatch" + ); + assert_eq!( + validator_metadata.validator_index, cluster.validator_metadata.validator_index, + "Validator index mismatch" + ); + assert_eq!( + validator_metadata.fee_recipient, cluster.validator_metadata.fee_recipient, + "Fee recipient mismatch" + ); + assert_eq!( + validator_metadata.graffiti, cluster.validator_metadata.graffiti, + "Graffiti mismatch" + ); + }); } // Database (Persistent Storage) Assertions diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index 6a1df146..9e9ea38d 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -7,7 +7,7 @@ mod validator_database_tests { #[test] // Test updating the fee recipient fn test_update_fee_recipient() { - let mut fixture = TestFixture::new(); + let fixture = TestFixture::new(); let cluster = &fixture.cluster; let new_address = Address::random(); @@ -22,7 +22,7 @@ mod validator_database_tests { .expect("Failed to update fee recipient"); // Verify update in memory state - let metadata = &fixture.db.state.validator_metadata[&cluster.cluster_id]; + let metadata = &fixture.db.get_validator_metadata(&cluster.cluster_id).expect("Failed to get cluster metadata"); assert_eq!( metadata.fee_recipient, new_address, "Fee recipient not updated in memory" @@ -44,7 +44,7 @@ mod validator_database_tests { #[test] /// Test updating the graffiti of a validator fn test_update_graffiti() { - let mut fixture = TestFixture::new(); + let fixture = TestFixture::new(); let cluster = &fixture.cluster; let new_graffiti = Graffiti::default(); // Or create a specific test graffiti @@ -59,7 +59,7 @@ mod validator_database_tests { .expect("Failed to update graffiti"); // Verify update in memory state - let metadata = &fixture.db.state.validator_metadata[&cluster.cluster_id]; + let metadata = &fixture.db.get_validator_metadata(&cluster.cluster_id).expect("Failed to get cluster metadata"); assert_eq!( metadata.graffiti, new_graffiti, "Graffiti not updated in memory" @@ -69,7 +69,7 @@ mod validator_database_tests { #[test] /// Test updating the fee recipient of a validator that does not exist fn test_update_validator_nonexistent_cluster() { - let mut fixture = TestFixture::new(); + let fixture = TestFixture::new(); let nonexistent_cluster_id = ClusterId(*fixture.cluster.cluster_id + 1); let result = fixture.db.update_fee_recipient( diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 2c905d53..5751c80a 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -7,13 +7,14 @@ use types::{Address, Graffiti, PublicKey}; impl NetworkDatabase { /// Update the fee recipient address for a validator pub fn update_fee_recipient( - &mut self, + &self, cluster_id: ClusterId, validator_pubkey: PublicKey, fee_recipient: Address, ) -> Result<(), DatabaseError> { // Make sure we are part of the cluster for this Validator - if !self.state.clusters.contains(&cluster_id) { + let is_member = self.read_state(|state| state.clusters.contains(&cluster_id)); + if !is_member { return Err(DatabaseError::NotFound(format!( "Validator for Cluster {} not in database", *cluster_id @@ -26,23 +27,27 @@ impl NetworkDatabase { fee_recipient.to_string(), validator_pubkey.to_string() ])?; - let metadata = self - .state - .validator_metadata - .get_mut(&cluster_id) - .expect("Cluster should exist"); - metadata.fee_recipient = fee_recipient; + + self.modify_state(|state| { + let metadata = state + .validator_metadata + .get_mut(&cluster_id) + .expect("Cluster should exist"); + metadata.fee_recipient = fee_recipient; + }); + Ok(()) } /// Update the graffiti for a validator pub fn update_graffiti( - &mut self, + &self, cluster_id: ClusterId, validator_pubkey: PublicKey, graffiti: Graffiti, ) -> Result<(), DatabaseError> { - if !self.state.clusters.contains(&cluster_id) { + let is_member = self.read_state(|state| state.clusters.contains(&cluster_id)); + if !is_member { return Err(DatabaseError::NotFound(format!( "Validator for Cluster {} not in database", *cluster_id @@ -58,12 +63,13 @@ impl NetworkDatabase { ])?; // Update the in-memory state - let metadata = self - .state - .validator_metadata - .get_mut(&cluster_id) - .expect("Cluster should exist since we checked above"); - metadata.graffiti = graffiti; + self.modify_state(|state| { + let metadata = state + .validator_metadata + .get_mut(&cluster_id) + .expect("Cluster should exist"); + metadata.graffiti = graffiti; + }); Ok(()) } From f358ce52b0ff8d11bb32f4c5e2372f90f386b3bf Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 13 Dec 2024 20:49:25 +0000 Subject: [PATCH 26/50] fmt and clippy --- anchor/database/src/tests/cluster_tests.rs | 4 ++-- anchor/database/src/tests/state_tests.rs | 5 ++++- anchor/database/src/tests/validator_tests.rs | 10 ++++++++-- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index 1e7adf49..a78fa192 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -26,7 +26,7 @@ mod cluster_database_tests { #[test] // Test deleting a cluster and make sure that it is properly cleaned up fn test_delete_cluster() { - let mut fixture = TestFixture::new(); + let fixture = TestFixture::new(); fixture .db .delete_cluster(fixture.cluster.cluster_id) @@ -38,7 +38,7 @@ mod cluster_database_tests { #[test] // Test updating the operational status of the cluster fn test_update_cluster_status() { - let mut fixture = TestFixture::new(); + let fixture = TestFixture::new(); let cluster_id = fixture.cluster.cluster_id; // Test updating to liquidated diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index faa0f9ba..42495b6c 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -48,7 +48,10 @@ mod state_database_tests { .db .processed_block(10) .expect("Failed to update the block number"); - assert_eq!(fixture.db.read_state(|state| state.last_processed_block), 10); + assert_eq!( + fixture.db.read_state(|state| state.last_processed_block), + 10 + ); } #[test] diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index 9e9ea38d..fd55ec46 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -22,7 +22,10 @@ mod validator_database_tests { .expect("Failed to update fee recipient"); // Verify update in memory state - let metadata = &fixture.db.get_validator_metadata(&cluster.cluster_id).expect("Failed to get cluster metadata"); + let metadata = &fixture + .db + .get_validator_metadata(&cluster.cluster_id) + .expect("Failed to get cluster metadata"); assert_eq!( metadata.fee_recipient, new_address, "Fee recipient not updated in memory" @@ -59,7 +62,10 @@ mod validator_database_tests { .expect("Failed to update graffiti"); // Verify update in memory state - let metadata = &fixture.db.get_validator_metadata(&cluster.cluster_id).expect("Failed to get cluster metadata"); + let metadata = &fixture + .db + .get_validator_metadata(&cluster.cluster_id) + .expect("Failed to get cluster metadata"); assert_eq!( metadata.graffiti, new_graffiti, "Graffiti not updated in memory" From 7a855a051835b1a797ee5439793240b71601b461 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 16 Dec 2024 17:16:23 +0000 Subject: [PATCH 27/50] load in block number even if we have not found id --- Cargo.lock | 121 +++++++++++++++++------------------ anchor/database/src/state.rs | 11 ++-- 2 files changed, 67 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ff2d50c..c0a30405 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -620,7 +620,7 @@ dependencies = [ "http 1.2.0", "http-body 1.0.1", "http-body-util", - "hyper 1.5.1", + "hyper 1.5.2", "hyper-util", "itoa", "matchit", @@ -790,18 +790,18 @@ dependencies = [ [[package]] name = "bit-set" -version = "0.5.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3" dependencies = [ "bit-vec", ] [[package]] name = "bit-vec" -version = "0.6.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" +checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] name = "bitflags" @@ -896,7 +896,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "alloy-primitives", "arbitrary", @@ -1165,7 +1165,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "alloy-primitives", "clap", @@ -1189,7 +1189,7 @@ dependencies = [ "fdlimit", "http_api", "http_metrics", - "hyper 1.5.1", + "hyper 1.5.2", "network", "parking_lot 0.12.3", "processor", @@ -1229,7 +1229,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "itertools 0.10.5", ] @@ -1246,7 +1246,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "quote", "syn 1.0.109", @@ -1392,18 +1392,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1420,9 +1420,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" @@ -1853,7 +1853,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2317,7 +2317,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2339,7 +2339,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2407,7 +2407,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "bytes", "discv5 0.9.0", @@ -2811,7 +2811,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -3157,7 +3157,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "async-channel", "asynchronous-codec", @@ -3553,9 +3553,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.31" +version = "0.14.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" dependencies = [ "bytes", "futures-channel", @@ -3577,9 +3577,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", @@ -3602,7 +3602,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -3615,7 +3615,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.31", + "hyper 0.14.32", "native-tls", "tokio", "tokio-native-tls", @@ -3631,7 +3631,7 @@ dependencies = [ "futures-util", "http 1.2.0", "http-body 1.0.1", - "hyper 1.5.1", + "hyper 1.5.2", "pin-project-lite", "tokio", "tower-service", @@ -3849,7 +3849,7 @@ dependencies = [ "bytes", "futures", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "rand", "tokio", @@ -3954,7 +3954,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "bytes", ] @@ -4150,7 +4150,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "arbitrary", "c-kzg", @@ -4790,7 +4790,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4848,7 +4848,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "git-version", "target_info", @@ -4927,7 +4927,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -4975,7 +4975,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "fnv", ] @@ -5051,7 +5051,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5093,7 +5093,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "prometheus", ] @@ -5443,7 +5443,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", - "libm", ] [[package]] @@ -5802,7 +5801,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.6", + "thiserror 2.0.7", "ucd-trie", ] @@ -5935,7 +5934,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "reqwest", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6075,9 +6074,9 @@ dependencies = [ [[package]] name = "proptest" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" dependencies = [ "bit-set", "bit-vec", @@ -6095,9 +6094,9 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff7ff745a347b87471d859a377a9a404361e7efc2a971d73424a6d183c0fc77" +checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", @@ -6196,7 +6195,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.6", + "thiserror 2.0.7", "tokio", "tracing", ] @@ -6215,7 +6214,7 @@ dependencies = [ "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.6", + "thiserror 2.0.7", "tinyvec", "tracing", "web-time", @@ -6436,7 +6435,7 @@ dependencies = [ "h2", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.31", + "hyper 0.14.32", "hyper-rustls", "hyper-tls", "ipnet", @@ -6861,7 +6860,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" [[package]] name = "salsa20" @@ -7035,7 +7034,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "serde", "url", @@ -7657,7 +7656,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -7786,7 +7785,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "async-channel", "futures", @@ -7844,7 +7843,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "quote", "syn 1.0.109", @@ -7861,11 +7860,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "93605438cbd668185516ab499d589afb7ee1859ea3d5fc8f6b0755e1c7443767" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.7", ] [[package]] @@ -7881,9 +7880,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "e1d8749b4531af2117677a5fcd12b1348a3fe2b81e36e61ffeac5c4aa3273e36" dependencies = [ "proc-macro2", "quote", @@ -8359,7 +8358,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8537,7 +8536,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#d49e1be35d3776bb6ce074d9446b6ff3663bf7fe" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#847c8019c7867e3eaf65168e5259ea33e7e0eb5a" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -8676,7 +8675,7 @@ dependencies = [ "futures-util", "headers", "http 0.2.12", - "hyper 0.14.31", + "hyper 0.14.32", "log", "mime", "mime_guess", diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 6c1bc84b..b8c847fd 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -19,6 +19,9 @@ impl NetworkState { // Get database connection from the pool let conn = conn_pool.get()?; + // Get the last processed block from the database + let last_processed_block = Self::get_last_processed_block(&conn)?; + // Without an Id, we have no idea who we are. Check to see if an operator with our PublicKey // is stored the database, else we have to wait for it to be processed by the execution // layer @@ -26,7 +29,10 @@ impl NetworkState { operator_id } else { // If it does not exist, just default the state - return Ok(Self::default()); + return Ok(Self { + last_processed_block, + ..Default::default() + }); }; // First Phase: Fetch data from the database @@ -66,9 +72,6 @@ impl NetworkState { } }); - // Finally, get the last processed block from the database - let last_processed_block = Self::get_last_processed_block(&conn)?; - // Return fully constructed state Ok(Self { id: Some(id), From 95731fa1fa0644b39695fb7d6bc2065c61f927cf Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 17 Dec 2024 12:38:46 +0000 Subject: [PATCH 28/50] cargo sort --- Cargo.toml | 5 ++--- anchor/common/ssv_types/Cargo.toml | 2 +- anchor/database/Cargo.toml | 8 ++++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c1a30d95..d8c74799 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,11 +5,11 @@ members = [ "anchor/client", "anchor/common/ssv_types", "anchor/common/version", + "anchor/common/version", + "anchor/database", "anchor/http_api", "anchor/http_metrics", "anchor/network", - "anchor/database", - "anchor/common/version", "anchor/processor", "anchor/qbft", ] @@ -63,7 +63,6 @@ base64 = "0.22.1" rusqlite = "0.28.0" openssl = "0.10.68" - [profile.maxperf] inherits = "release" lto = "fat" diff --git a/anchor/common/ssv_types/Cargo.toml b/anchor/common/ssv_types/Cargo.toml index ed093c6a..14be4cbe 100644 --- a/anchor/common/ssv_types/Cargo.toml +++ b/anchor/common/ssv_types/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Sigma Prime "] [dependencies] base64 = { workspace = true } -rusqlite = { workspace = true } derive_more = { workspace = true } openssl = { workspace = true } +rusqlite = { workspace = true } types = { workspace = true } diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml index b73391de..a166d24c 100644 --- a/anchor/database/Cargo.toml +++ b/anchor/database/Cargo.toml @@ -5,14 +5,14 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +base64 = { workspace = true } +openssl = { workspace = true } +parking_lot = { workspace = true } r2d2 = "0.8.10" r2d2_sqlite = "0.21.0" -rusqlite = { workspace = true} +rusqlite = { workspace = true } ssv_types = { workspace = true } types = { workspace = true } -base64 = { workspace = true } -openssl = { workspace = true } -parking_lot = { workspace = true } [dev-dependencies] rand = "0.8.5" From de5be8df5b3c366c20a5fa3a77d5f5f372fd2f44 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 19 Dec 2024 00:38:06 +0000 Subject: [PATCH 29/50] mvp multi index map --- anchor/database/src/multi_index.rs | 308 +++++++++++++++++++++++++++++ 1 file changed, 308 insertions(+) create mode 100644 anchor/database/src/multi_index.rs diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs new file mode 100644 index 00000000..731d2933 --- /dev/null +++ b/anchor/database/src/multi_index.rs @@ -0,0 +1,308 @@ +use dashmap::DashMap; +use std::{hash::Hash, marker::PhantomData}; + +/// Marker trait for uniquely identifying indicies +pub trait Unique {} + +/// Marker trait for non-uniquely identifiying indicies +pub trait NotUnique {} + +/// Index type markers +pub enum Primary {} +pub enum Secondary {} +pub enum Tertiary {} + +// Type tags markers +#[derive(Debug)] +pub enum UniqueTag {} +impl Unique for UniqueTag {} + +#[derive(Debug)] +pub enum NonUniqueTag {} +impl NotUnique for NonUniqueTag {} + +/// Trait for accessing values through a unique index +pub trait UniqueIndex { + fn get_by(&self, key: &K) -> Option; +} + +/// Trait for accessing values through a non-unique index +pub trait NonUniqueIndex { + fn get_all_by(&self, key: &K) -> Option>; +} + +#[derive(Debug, Default)] +struct InnerMaps +where + K1: Eq + Hash, + K2: Eq + Hash, + K3: Eq + Hash, +{ + primary: DashMap, + secondary_unique: DashMap, + secondary_multi: DashMap>, + tertiary_unique: DashMap, + tertiary_multi: DashMap>, +} + +/// A concurrent multi-index map that supports up to three different access patterns. +/// The core differentiates between unique identification and non unique identification. The primary +/// index is forced to always uniquely identify the value. The secondary and tertiary indicies have +/// more flexibility. They key may non uniquely identify many different values, or uniquely identify +/// a single value +/// +/// Example: A share is uniquely identified by the Validators public key that it belongs too. A +/// ClusterId does not uniquely identify a share as a cluster contains multiple shares +/// +/// - K1: Primary key type (always unique) +/// - K2: Secondary key type +/// - K3: Tertiary key type +/// - V: Value type +/// - U1: Secondary index uniqueness (Unique or NotUnique) +/// - U2: Tertiary index uniqueness (Unique or NotUnique) +#[derive(Debug, Default)] +pub struct MultiIndexMap +where + K1: Eq + Hash, + K2: Eq + Hash, + K3: Eq + Hash, +{ + maps: InnerMaps, + _marker: PhantomData<(U1, U2)>, +} + +impl MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: 'static, + U2: 'static, +{ + /// Creates a new empty MultiIndexMap + pub fn new() -> Self { + Self { + maps: InnerMaps { + primary: DashMap::new(), + secondary_unique: DashMap::new(), + secondary_multi: DashMap::new(), + tertiary_unique: DashMap::new(), + tertiary_multi: DashMap::new(), + }, + _marker: PhantomData, + } + } + + /// Insert a new value and associated keys into the map + pub fn insert(&self, k1: &K1, k2: &K2, k3: &K3, v: V) { + // Insert into primary map first + self.maps.primary.insert(k1.clone(), v); + + // Handle secondary index based on uniqueness + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.secondary_unique.insert(k2.clone(), k1.clone()); + } else { + self.maps + .secondary_multi + .entry(k2.clone()) + .and_modify(|v| v.push(k1.clone())) + .or_insert_with(|| vec![k1.clone()]); + } + + // Handle tertiary index based on uniqueness + if std::any::TypeId::of::() == std::any::TypeId::of::() { + self.maps.tertiary_unique.insert(k3.clone(), k1.clone()); + } else { + self.maps + .tertiary_multi + .entry(k3.clone()) + .and_modify(|v| v.push(k1.clone())) + .or_insert_with(|| vec![k1.clone()]); + } + } +} + +// Implement unique access for primary key +impl UniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, +{ + fn get_by(&self, key: &K1) -> Option { + self.maps.primary.get(key).map(|v| v.value().clone()) + } +} + +// Implement unique access for secondary key +impl UniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: Unique, +{ + fn get_by(&self, key: &K2) -> Option { + let primary_key = self.maps.secondary_unique.get(key)?; + self.maps + .primary + .get(primary_key.value()) + .map(|v| v.value().clone()) + } +} + +// Implement non-unique access for secondary key +impl NonUniqueIndex + for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: NotUnique, +{ + fn get_all_by(&self, key: &K2) -> Option> { + self.maps.secondary_multi.get(key).map(|keys| { + keys.value() + .iter() + .filter_map(|k1| self.maps.primary.get(k1).map(|v| v.value().clone())) + .collect() + }) + } +} + +// Implement unique access for tertiary key +impl UniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U2: Unique, +{ + fn get_by(&self, key: &K3) -> Option { + let primary_key = self.maps.tertiary_unique.get(key)?; + self.maps + .primary + .get(primary_key.value()) + .map(|v| v.value().clone()) + } +} + +// Implement non-unique access for tertiary key +impl NonUniqueIndex for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U2: NotUnique, +{ + fn get_all_by(&self, key: &K3) -> Option> { + self.maps.tertiary_multi.get(key).map(|keys| { + keys.value() + .iter() + .filter_map(|k1| self.maps.primary.get(k1).map(|v| v.value().clone())) + .collect() + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::test_prelude::generators; + use ssv_types::{Cluster, ClusterId, OperatorId, Share}; + use types::{Address, PublicKey}; + + #[test] + fn test_nonunique() { + let cluster_id = ClusterId(10); + let operator_id = OperatorId(10); + let owner = Address::random(); + + // Shares with different public keys, but same cluster id and owner + let share_1 = generators::share::random(cluster_id, operator_id); + let pk_1 = generators::pubkey::random(); + let share_2 = generators::share::random(cluster_id, operator_id); + let pk_2 = generators::pubkey::random(); + + // A MultiIndexMap for accessing Shares + // Primary Key: validator public key which uniquly identifies a share + // Secondary Key: cluster id which does not uniquely identify a share (NonUniqueTag) + // Tertiary Key: owner address which does not uniquely identify a share (NonUniqueTag) + let map: MultiIndexMap = + MultiIndexMap::new(); + + // insert the data + map.insert(&pk_1, &cluster_id, &owner, share_1); + map.insert(&pk_2, &cluster_id, &owner, share_2); + + // This does not compile since + // let shares = map.get_all_by(&pk_1); + + // This does compile + let share_1 = map.get_by(&pk_1); + assert!(share_1.is_some()); + + // This does not compile since we enforce NonUnique via NonUniqueTag + // let share = map.get_by(&cluster_id); + + // This does compile + let shares = map.get_all_by(&cluster_id).expect("Failed to get shares"); + assert!(shares.len() == 2); + + // Like above, this does not compile + // let share = map.get_by(&owner); + + // This does compile + let shares = map.get_all_by(&owner).expect("Failed to get shares"); + assert!(shares.len() == 2); + } + + #[test] + fn test_unique() { + // generate a cluster and its corresponding validator + let cluster = generators::cluster::random(4); + let validator_metadata = generators::validator::random_metadata(cluster.cluster_id); + + // A MultiIndexMap for accessing a cluster + // Primary Key: cluster id that uniquely identifies the cluster + // Secondary Key: validator public key that uniquely identifies this cluster + // Tertiary Key: owner address that uniquely identifies this cluster + let map: MultiIndexMap = + MultiIndexMap::new(); + + // insert the cluster + map.insert( + &cluster.cluster_id, + &validator_metadata.public_key, + &cluster.owner, + cluster.clone(), + ); + + // - Fetch via cluster id + // This does not compile + //let cluster = map.get_all_by(&cluster.cluster_id); + // This does compile + let c = map.get_by(&cluster.cluster_id); + assert!(c.is_some()); + + // - Fetch via public key + // This does not compile + //let cluster = map.get_all_by(&validator_metadata.public_key); + // This does compile due to UniqueTag + let c = map.get_by(&validator_metadata.public_key); + assert!(c.is_some()); + + // - Fetch via owner + // This does not compile + //let cluster = map.get_all_by(&cluster.owner); + // This does compile due to UniqueTag + let c = map.get_by(&cluster.owner); + assert!(c.is_some()); + } +} From 69652e7cb97a0f93e9f36108b0be7b0bbeebb5de Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 19 Dec 2024 00:47:36 +0000 Subject: [PATCH 30/50] type rework --- anchor/common/ssv_types/src/cluster.rs | 34 ++++++++++++++------------ anchor/common/ssv_types/src/share.rs | 5 ++++ 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 308aee67..318a2ad5 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -1,36 +1,42 @@ use crate::OperatorId; use crate::Share; use derive_more::{Deref, From}; +use std::collections::HashSet; +use std::collections::HashMap; use types::{Address, Graffiti, PublicKey}; /// Unique identifier for a cluster #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] pub struct ClusterId(pub u64); -/// A Cluster is a group of Operators that are acting on behalf of a Validator +/// A Cluster is a group of Operators that are acting on behalf of one or more Validators +/// +/// Each cluster is owned by a unqiue EOA and only that Address may perform operators on the +/// Cluster. #[derive(Debug, Clone)] pub struct Cluster { /// Unique identifier for a Cluster pub cluster_id: ClusterId, - /// All of the members of this Cluster - pub cluster_members: Vec, + /// The owner of the cluster and all of the validators + pub owner: Address, + /// The Eth1 fee address for all validators in the cluster + pub fee_recipient: Address, /// The number of faulty operator in the Cluster pub faulty: u64, /// If the Cluster is liquidated or active pub liquidated: bool, - /// Metadata about the validator this committee represents - pub validator_metadata: ValidatorMetadata, + /// Operators in this cluster + pub cluster_members: HashSet, } -/// A member of a Cluster. This is just an Operator that holds onto a share of the Validator key +/// A member of a Cluster. +/// This is an Operator that holds a piece of the keyshare for each validator in the cluster #[derive(Debug, Clone)] pub struct ClusterMember { /// Unique identifier for the Operator this member represents pub operator_id: OperatorId, /// Unique identifier for the Cluster this member is a part of pub cluster_id: ClusterId, - /// The Share this member is responsible for - pub share: Share, } /// Index of the validator in the validator registry. @@ -40,14 +46,12 @@ pub struct ValidatorIndex(pub usize); /// General Metadata about a Validator #[derive(Debug, Clone)] pub struct ValidatorMetadata { - /// Index of the validator - pub validator_index: ValidatorIndex, /// Public key of the validator - pub validator_pubkey: PublicKey, - /// Eth1 fee address - pub fee_recipient: Address, + pub public_key: PublicKey, + /// The cluster that is responsible for this validator + pub cluster_id: ClusterId, + /// Index of the validator + pub index: ValidatorIndex, /// Graffiti pub graffiti: Graffiti, - /// The owner of the validator - pub owner: Address, } diff --git a/anchor/common/ssv_types/src/share.rs b/anchor/common/ssv_types/src/share.rs index 80672602..a7180b54 100644 --- a/anchor/common/ssv_types/src/share.rs +++ b/anchor/common/ssv_types/src/share.rs @@ -1,8 +1,13 @@ +use crate::{ClusterId, OperatorId}; use types::PublicKey; /// One of N shares of a split validator key. #[derive(Debug, Clone)] pub struct Share { + /// Operator this share belongs to + pub operator_id: OperatorId, + /// Cluster the operator who owns this share belongs to + pub cluster_id: ClusterId, /// The public key of this Share pub share_pubkey: PublicKey, /// The encrypted private key of the share From 487a20094cb0e45c976abe592679a5f3286c6b2e Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 19 Dec 2024 16:39:51 +0000 Subject: [PATCH 31/50] multi index map integration, type rewrite integration, start on test fix --- Cargo.lock | 15 + Cargo.toml | 1 + anchor/common/ssv_types/src/cluster.rs | 2 - .../common/ssv_types/src/sql_conversions.rs | 144 +++--- anchor/database/Cargo.toml | 1 + anchor/database/src/cluster_operations.rs | 135 ++--- anchor/database/src/lib.rs | 204 +++----- anchor/database/src/multi_index.rs | 75 ++- anchor/database/src/operator_operations.rs | 55 +- anchor/database/src/share_operations.rs | 8 +- anchor/database/src/sql_operations.rs | 130 +++++ anchor/database/src/state.rs | 213 ++++---- anchor/database/src/table_schema.sql | 19 +- anchor/database/src/tests/cluster_tests.rs | 2 + anchor/database/src/tests/mod.rs | 2 + anchor/database/src/tests/operator_tests.rs | 22 +- anchor/database/src/tests/state_tests.rs | 2 + anchor/database/src/tests/utils.rs | 484 +++++++++--------- anchor/database/src/tests/validator_tests.rs | 39 +- anchor/database/src/validator_operations.rs | 70 +-- 20 files changed, 904 insertions(+), 719 deletions(-) create mode 100644 anchor/database/src/sql_operations.rs diff --git a/Cargo.lock b/Cargo.lock index a517bfe4..6f734a0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1636,6 +1636,20 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04d2cd9c18b9f454ed67da600630b021a8a80bf33f8c95896ab33aaf1c26b728" +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" version = "2.6.0" @@ -1667,6 +1681,7 @@ name = "database" version = "0.1.0" dependencies = [ "base64 0.22.1", + "dashmap", "openssl", "parking_lot 0.12.3", "r2d2", diff --git a/Cargo.toml b/Cargo.toml index d8c74799..2f6ccd0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,6 +62,7 @@ tracing-subscriber = { version = "0.3.18", features = ["fmt", "env-filter"] } base64 = "0.22.1" rusqlite = "0.28.0" openssl = "0.10.68" +dashmap = "6.1.0" [profile.maxperf] inherits = "release" diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 318a2ad5..293da6e8 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -1,8 +1,6 @@ use crate::OperatorId; -use crate::Share; use derive_more::{Deref, From}; use std::collections::HashSet; -use std::collections::HashMap; use types::{Address, Graffiti, PublicKey}; /// Unique identifier for a cluster diff --git a/anchor/common/ssv_types/src/sql_conversions.rs b/anchor/common/ssv_types/src/sql_conversions.rs index 96522cf3..bc7292e0 100644 --- a/anchor/common/ssv_types/src/sql_conversions.rs +++ b/anchor/common/ssv_types/src/sql_conversions.rs @@ -1,7 +1,6 @@ -use crate::{ - Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, - ValidatorMetadata, -}; +use crate::{Cluster, ClusterId, ClusterMember}; +use crate::{OperatorId, Operator}; +use crate::{Share, ValidatorMetadata, ValidatorIndex}; use base64::prelude::*; use openssl::rsa::Rsa; use rusqlite::{types::Type, Error as SqlError, Row}; @@ -20,21 +19,20 @@ fn from_sql_error( // Conversion from SQL row to an Operator impl TryFrom<&Row<'_>> for Operator { - // Change the error type to rusqlite::Error - type Error = SqlError; - + type Error = rusqlite::Error; fn try_from(row: &Row) -> Result { + // Get the OperatorId from column 0 let id: OperatorId = OperatorId(row.get(0)?); - // For each operation that could fail, we convert its error to a rusqlite::Error + // Get the public key from column 1 let pem_string = row.get::<_, String>(1)?; let decoded_pem = BASE64_STANDARD .decode(pem_string) .map_err(|e| from_sql_error(1, Type::Text, e))?; - let rsa_pubkey = Rsa::public_key_from_pem(&decoded_pem).map_err(|e| from_sql_error(1, Type::Text, e))?; + // Get the owner from column 2 let owner_str = row.get::<_, String>(2)?; let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(2, Type::Text, e))?; @@ -46,80 +44,112 @@ impl TryFrom<&Row<'_>> for Operator { } } -// Conversion from SQL row into a Share -impl TryFrom<&Row<'_>> for Share { +// Conversion from SQL row and cluster members into a Cluster +impl TryFrom<(&Row<'_>, Vec)> for Cluster { type Error = rusqlite::Error; - fn try_from(row: &Row) -> Result { - // We get the share_pubkey string from column 2 - let share_pubkey_str = row.get::<_, String>(2)?; - // Convert the string to PublicKey, wrapping any parsing errors - let share_pubkey = PublicKey::from_str(&share_pubkey_str) - .map_err(|e| from_sql_error(2, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + fn try_from( + (row, cluster_members): (&Row<'_>, Vec), + ) -> Result { + // Get ClusterId from column 0 + let cluster_id = ClusterId(row.get(0)?); - // Get the encrypted private key from column 3 - let encrypted_private_key: [u8; 256] = row.get(3)?; + // Get the owner from column 1 + let owner_str = row.get::<_, String>(1)?; + let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(1, Type::Text, e))?; - Ok(Share { - share_pubkey, - encrypted_private_key, + // Get the fee_recipient from column 2 + let fee_recipient_str = row.get::<_, String>(2)?; + let fee_recipient = + Address::from_str(&fee_recipient_str).map_err(|e| from_sql_error(2, Type::Text, e))?; + + // Get faulty count from column 3 + let faulty: u64 = row.get(3)?; + + // Get liquidated status from column 4 + let liquidated: bool = row.get(4)?; + + Ok(Cluster { + cluster_id, + owner, + fee_recipient, + faulty, + liquidated, + cluster_members: cluster_members + .into_iter() + .map(|member| member.operator_id) + .collect(), }) } } -// Conversion from SQL row and cluster members into a Cluster -impl TryFrom<(&Row<'_>, Vec)> for Cluster { +impl TryFrom<&Row<'_>> for ClusterMember { type Error = rusqlite::Error; - fn try_from((row, cluster_members): (&Row, Vec)) -> Result { - // These are simple numeric/boolean conversions that use rusqlite's built-in error handling - let cluster_id: ClusterId = ClusterId(row.get(0)?); - let faulty: u64 = row.get(1)?; - let liquidated: bool = row.get(2)?; - // Convert the row to ValidatorMetadata - this will use the ValidatorMetadata impl - // defined below - let validator_metadata: ValidatorMetadata = row.try_into()?; + fn try_from(row: &Row) -> Result { + // Get ClusterId from column 0 + let cluster_id = ClusterId(row.get(0)?); - Ok(Cluster { + // Get OperatorId from column 1 + let operator_id = OperatorId(row.get(1)?); + + Ok(ClusterMember { + operator_id, cluster_id, - cluster_members, - faulty, - liquidated, - validator_metadata, }) } } // Conversion from SQL row to ValidatorMetadata +// Intertwined with Share conversion via "GetShareAndValidator" impl TryFrom<&Row<'_>> for ValidatorMetadata { type Error = SqlError; fn try_from(row: &Row) -> Result { - // Get and parse validator_pubkey from column 3 - let validator_pubkey_str = row.get::<_, String>(3)?; - let validator_pubkey = PublicKey::from_str(&validator_pubkey_str) - .map_err(|e| from_sql_error(2, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + // Get public key from column 0 + let validator_pubkey_str = row.get::<_, String>(0)?; + let public_key = PublicKey::from_str(&validator_pubkey_str) + .map_err(|e| from_sql_error(1, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; - // Get the owner from column 7 - let owner_str = row.get::<_, String>(4)?; - let owner = Address::from_str(&owner_str).map_err(|e| from_sql_error(7, Type::Text, e))?; + // Get ClusterId from column 1 + let cluster_id: ClusterId = ClusterId(row.get(1)?); - // Get and parse fee_recipient from column 4 - let fee_recipient_str = row.get::<_, String>(4)?; - let fee_recipient = - Address::from_str(&fee_recipient_str).map_err(|e| from_sql_error(4, Type::Text, e))?; - - // Get the Graffifi from column 5 - let graffiti = Graffiti(row.get::<_, [u8; GRAFFITI_BYTES_LEN]>(5)?); + // Get ValidatorIndex from column 2 + let index: ValidatorIndex = ValidatorIndex(row.get(2)?); - // Get validator_index from column 6 - let validator_index: ValidatorIndex = ValidatorIndex(row.get(6)?); + // Get Graffiti from column 3 + let graffiti = Graffiti(row.get::<_, [u8; GRAFFITI_BYTES_LEN]>(3)?); Ok(ValidatorMetadata { - validator_index, - validator_pubkey, - fee_recipient, + public_key, + cluster_id, + index, graffiti, - owner, + }) + } +} + +// Conversion from SQL row into a Share +// Intertwined with Metadata conversion via "GetShareAndValidator" +impl TryFrom<&Row<'_>> for Share { + type Error = rusqlite::Error; + fn try_from(row: &Row) -> Result { + // Get Share PublicKey from column 4 + let share_pubkey_str = row.get::<_, String>(4)?; + let share_pubkey = PublicKey::from_str(&share_pubkey_str) + .map_err(|e| from_sql_error(4, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + + // Get the encrypted private key from column 5 + let encrypted_private_key: [u8; 256] = row.get(5)?; + + // Get the OperatorId from column 6 and ClusterId from column 1 + let operator_id = OperatorId(row.get(6)?); + let cluster_id = ClusterId(row.get(1)?); + + Ok(Share { + operator_id, + cluster_id, + share_pubkey, + encrypted_private_key, }) } } diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml index a166d24c..21681ada 100644 --- a/anchor/database/Cargo.toml +++ b/anchor/database/Cargo.toml @@ -13,6 +13,7 @@ r2d2_sqlite = "0.21.0" rusqlite = { workspace = true } ssv_types = { workspace = true } types = { workspace = true } +dashmap = { workspace = true } [dev-dependencies] rand = "0.8.5" diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 1a066733..4334b4ee 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,104 +1,109 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::params; -use ssv_types::{Cluster, ClusterId}; +use ssv_types::{Cluster, ClusterId, Share, ValidatorMetadata}; +use std::sync::atomic::Ordering; +use types::PublicKey; /// Implements all cluster related functionality on the database impl NetworkDatabase { /// Inserts a new cluster into the database - pub fn insert_cluster(&self, cluster: Cluster) -> Result<(), DatabaseError> { + pub fn insert_validator( + &self, + cluster: Cluster, + validator: ValidatorMetadata, + shares: Vec, + ) -> Result<(), DatabaseError> { let mut conn = self.connection()?; let tx = conn.transaction()?; // Insert the top level cluster data and associated validator metadata tx.prepare_cached(SQL[&SqlStatement::InsertCluster])? - .execute(params![*cluster.cluster_id])?; + .execute(params![ + *cluster.cluster_id, // cluster id + cluster.owner.to_string(), // owner + cluster.fee_recipient.to_string(), // fee recipient + ])?; tx.prepare_cached(SQL[&SqlStatement::InsertValidator])? .execute(params![ - cluster.validator_metadata.validator_pubkey.to_string(), - *cluster.cluster_id, - cluster.validator_metadata.owner.to_string(), - cluster.validator_metadata.owner.to_string(), - *cluster.validator_metadata.validator_index, + validator.public_key.to_string(), // validator public key + *cluster.cluster_id, // cluster id + *validator.index, // validator index + validator.graffiti.0.as_slice(), // graffiti ])?; - // Insert all of the members and their shares + // Records our shares if one belongs to use + let mut our_share = None; + let own_id = self.state.single_state.id.load(Ordering::Relaxed); - let mut member_in_cluster = false; - let own_id = self.read_state(|state| state.id); - cluster.cluster_members.iter().try_for_each(|member| { - // check if we are a member in this cluster - if let Some(id) = own_id { - if id == member.operator_id { - member_in_cluster = true; - } + shares.iter().try_for_each(|share| { + // Check if any of these shares belong to us, meaning we are a member in the cluster + if own_id == *share.operator_id { + our_share = Some(share); } - // insert the cluster member and their share + + // insert the cluster member and the share tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? - .execute(params![*member.cluster_id, *member.operator_id])?; - self.insert_share( - &tx, - &member.share, - member.cluster_id, - member.operator_id, - &cluster.validator_metadata.validator_pubkey, - ) + .execute(params![*share.cluster_id, *share.operator_id])?; + self.insert_share(&tx, share, &validator.public_key) })?; // Commit all operations to the db tx.commit()?; // If we are a member in this cluster, store relevant information - if member_in_cluster { - self.modify_state(|state| { - let cluster_id = cluster.cluster_id; - // Store the cluster_id since we are a part of this cluster - state.clusters.insert(cluster_id); - cluster.cluster_members.iter().for_each(|member| { - // Store all of the operators that are a member of this cluster - state - .cluster_members - .entry(cluster_id) - .or_default() - .insert(member.operator_id); - // Store our share of the key - if member.operator_id == state.id.expect("Guaranteed to be populated") { - state.shares.insert(cluster_id, member.share.clone()); - } - }); - // Store the metadata of the validator for the cluster - state - .validator_metadata - .insert(cluster_id, cluster.validator_metadata); - }); + if let Some(share) = our_share { + let cluster_id = cluster.cluster_id; + + // Record that we are a member of this cluster + self.state.single_state.clusters.insert(cluster_id); + + // Save the keyshare + self.state.multi_state.shares.insert( + &validator.public_key, // The validator this keyshare belongs to + &cluster_id, // The id of the cluster + &cluster.owner, // The owner of the cluster + share.to_owned(), // The keyshare itself + ); + + // Save all cluster related information + self.state.multi_state.clusters.insert( + &cluster_id, // The id of the cluster + &validator.public_key, // The public key of validator added to the cluster + &cluster.owner, // Owner of the cluster + cluster.to_owned(), // The Cluster and all containing information + ); + + // Save the metadata for the validators + self.state.multi_state.validator_metadata.insert( + &validator.public_key, // The public key of the validator + &cluster_id, // The id of the cluster the validator belongs to + &cluster.owner, // The owner of the cluster + validator.to_owned(), // The metadata of the validator + ); } Ok(()) } /// Mark the cluster as liquidated or active - pub fn update_status(&self, id: ClusterId, status: bool) -> Result<(), DatabaseError> { + pub fn update_status(&self, cluster_id: ClusterId, status: bool) -> Result<(), DatabaseError> { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateClusterStatus])? - .execute(params![status, *id])?; + .execute(params![ + status, // status of the cluster (liquidated or active) + *cluster_id // Id of the cluster + ])?; Ok(()) } - /// Delete a cluster from the database. This will cascade and delete all corresponding cluster - /// members, shares, and validator metadata - /// This corresponds to a validator being removed or exiting - pub fn delete_cluster(&self, id: ClusterId) -> Result<(), DatabaseError> { + /// Delete a validator from a cluster. This will cascade and remove all corresponding share + /// data for this validator. If this validator is the last one in the cluster, the cluster + /// and all corresponding cluster members will also be removed + pub fn delete_validator(&self, validator_pubkey: &PublicKey) -> Result<(), DatabaseError> { let conn = self.connection()?; - conn.prepare_cached(SQL[&SqlStatement::DeleteCluster])? - .execute(params![*id])?; + conn.prepare_cached(SQL[&SqlStatement::DeleteValidator])? + .execute(params![validator_pubkey.to_string()])?; - // If we are a member of this cluster, remove all relevant information - self.modify_state(|state| { - if state.clusters.contains(&id) { - state.clusters.remove(&id); - state.shares.remove(&id); - state.validator_metadata.remove(&id); - state.cluster_members.remove(&id); - } - }); + // todo!() remove all the relevant information from in memory stores Ok(()) } } diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index f5169f60..44300376 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -1,19 +1,25 @@ -pub use crate::error::DatabaseError; +use dashmap::{DashMap, DashSet}; use openssl::{pkey::Public, rsa::Rsa}; -use parking_lot::RwLock; use r2d2_sqlite::SqliteConnectionManager; use rusqlite::params; -use ssv_types::{ClusterId, Operator, OperatorId, Share, ValidatorMetadata}; -use std::collections::{HashMap, HashSet}; +use ssv_types::{Cluster, ClusterId, Operator, OperatorId, Share, ValidatorMetadata}; use std::fs::File; use std::path::Path; -use std::sync::LazyLock; +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; use std::time::Duration; +use types::{Address, PublicKey}; + +pub use crate::error::DatabaseError; +use crate::multi_index::{MultiIndexMap, *}; +use crate::sql_operations::{SqlStatement, SQL}; mod cluster_operations; mod error; +mod multi_index; mod operator_operations; mod share_operations; +mod sql_operations; mod state; mod validator_operations; @@ -25,24 +31,53 @@ type PoolConn = r2d2::PooledConnection; const POOL_SIZE: u32 = 1; const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); -#[derive(Debug, Clone, Default)] -struct NetworkState { +type ShareMultiIndexMap = + MultiIndexMap; +type MetadataMultiIndexMap = + MultiIndexMap; +type ClusterMultiIndexMap = + MultiIndexMap; + +// Information that needs to be accesses via multiple different indicies +#[derive(Debug)] +pub struct MultiState { + /// All of the shares that belong to use + /// Primary: public key of validator. uniquely identifies share + /// Secondary: cluster id. corresponds to a list of shares + /// Tertiary: owner of the cluster. corresponds to a list of shares + pub shares: ShareMultiIndexMap, + /// Metadata for validators that delegate to us + /// Primary: public key of the validator. uniquely identifies the metadata + /// Secondary: cluster id. corresponds to list of metadata for all validators + /// Tertiary: owner of the cluster: corresponds to list of metadata for all validators + pub validator_metadata: MetadataMultiIndexMap, + /// All cluster data for each cluster we are a member in + /// Primary: cluster id. uniquely identifies a cluster + /// Secondary: public key of the validator. uniquely identifies a cluster + /// Tertiary: owner of the cluster. uniquely identifies a cluster + pub clusters: ClusterMultiIndexMap, +} + +// General information that can be single index access +#[derive(Debug, Default)] +pub struct SingleState { /// The ID of our own operator. This is determined via events when the operator is /// registered with the network. Therefore, this may not be available right away if the client - /// is running but has not bee registered with the network contract yet. - id: Option, + /// is running but has not been registered with the network contract yet. + id: AtomicU64, + /// The last block that was processed + last_processed_block: AtomicU64, /// All of the operators in the network - operators: HashMap, + operators: DashMap, /// All of the Clusters that we are a memeber of - clusters: HashSet, - /// All of the shares that we are responsible for/own - shares: HashMap, - /// ValidatorMetadata for clusters we are a member in - validator_metadata: HashMap, - /// Full set of members for a cluster we are in - cluster_members: HashMap>, - /// The last block that was processed - last_processed_block: u64, + clusters: DashSet, +} + +// Container to hold all network state +#[derive(Debug)] +pub struct NetworkState { + pub multi_state: MultiState, + single_state: SingleState, } /// Top level NetworkDatabase that contains in memory storage for quick access @@ -52,7 +87,7 @@ pub struct NetworkDatabase { /// The public key of our operator pubkey: Rsa, /// Custom state stores for easy data access - state: RwLock, + state: NetworkState, /// Connection to the database conn_pool: Pool, } @@ -61,7 +96,7 @@ impl NetworkDatabase { /// Construct a new NetworkDatabase at the given path and the Public Key of our operator. pub fn new(path: &Path, pubkey: &Rsa) -> Result { let conn_pool = Self::open_or_create(path)?; - let state = RwLock::new(NetworkState::new_with_state(&conn_pool, pubkey)?); + let state = NetworkState::new_with_state(&conn_pool, pubkey)?; Ok(Self { pubkey: pubkey.clone(), state, @@ -69,28 +104,15 @@ impl NetworkDatabase { }) } - pub(crate) fn read_state(&self, f: F) -> R - where - F: FnOnce(&NetworkState) -> R, - { - let state = self.state.read(); - f(&state) - } - - fn modify_state(&self, f: F) -> R - where - F: FnOnce(&mut NetworkState) -> R, - { - let mut state = self.state.write(); - f(&mut state) - } - /// Update the last processed block number in the database pub fn processed_block(&self, block_number: u64) -> Result<(), DatabaseError> { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateBlockNumber])? .execute(params![block_number])?; - self.modify_state(|state| state.last_processed_block = block_number); + self.state + .single_state + .last_processed_block + .store(block_number, Ordering::Relaxed); Ok(()) } @@ -136,109 +158,3 @@ impl NetworkDatabase { Ok(self.conn_pool.get()?) } } - -// Wrappers around various SQL statements used for interacting with the db -#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] -pub(crate) enum SqlStatement { - InsertOperator, - DeleteOperator, - GetOperatorId, - GetAllOperators, - - InsertCluster, - InsertClusterMember, - UpdateClusterStatus, - UpdateClusterFaulty, - DeleteCluster, - GetAllClusters, - GetClusterMembers, - - InsertShare, - InsertValidator, - UpdateFeeRecipient, - SetGraffiti, - SetValidatorIndex, - - UpdateBlockNumber, - GetBlockNumber, -} - -pub(crate) static SQL: LazyLock> = LazyLock::new(|| { - let mut m = HashMap::new(); - m.insert( - SqlStatement::InsertOperator, - "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", - ); - m.insert( - SqlStatement::DeleteOperator, - "DELETE FROM operators WHERE operator_id = ?1", - ); - m.insert( - SqlStatement::GetOperatorId, - "SELECT operator_id FROM operators WHERE public_key = ?1", - ); - m.insert(SqlStatement::GetAllOperators, "SELECT * FROM operators"); - m.insert( - SqlStatement::InsertCluster, - "INSERT INTO clusters (cluster_id) VALUES (?1)", - ); - m.insert( - SqlStatement::UpdateClusterStatus, - "UPDATE clusters SET liquidated = ?1 WHERE cluster_id = ?2", - ); - m.insert( - SqlStatement::UpdateClusterFaulty, - "UPDATE clusters SET faulty = ?1 WHERE cluster_id = ?2", - ); - m.insert( - SqlStatement::InsertClusterMember, - "INSERT INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", - ); - m.insert( - SqlStatement::DeleteCluster, - "DELETE FROM clusters WHERE cluster_id = ?1", - ); - m.insert( - SqlStatement::GetAllClusters, - "SELECT c.cluster_id, c.faulty, c.liquidated, - v.validator_pubkey, v.fee_recipient, v.graffiti, v.validator_index, v.owner - FROM clusters c - JOIN cluster_members cm ON c.cluster_id = cm.cluster_id - JOIN validators v ON c.cluster_id = v.cluster_id - WHERE cm.operator_id = ?", - ); - m.insert( - SqlStatement::GetClusterMembers, - "SELECT cm.cluster_id, cm.operator_id, s.share_pubkey, s.encrypted_key - FROM cluster_members cm - JOIN shares s ON cm.cluster_id = s.cluster_id AND cm.operator_id = s.operator_id - WHERE cm.cluster_id = ?", - ); - m.insert(SqlStatement::InsertShare, - "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey, encrypted_key) VALUES (?1, ?2, ?3, ?4, ?5)"); - m.insert( - SqlStatement::InsertValidator, - "INSERT INTO validators (validator_pubkey, cluster_id, fee_recipient, owner, validator_index) VALUES (?1, ?2, ?3, ?4, ?5)", - ); - m.insert( - SqlStatement::UpdateFeeRecipient, - "UPDATE validators SET fee_recipient = ?1 WHERE validator_pubkey = ?2", - ); - m.insert( - SqlStatement::SetGraffiti, - "UPDATE validators SET graffiti = ?1 WHERE validator_pubkey = ?2", - ); - m.insert( - SqlStatement::SetValidatorIndex, - "UPDATE validators SET validator_index = ?1 WHERE validator_pubkey = ?2", - ); - m.insert( - SqlStatement::UpdateBlockNumber, - "UPDATE block SET block_number = ?1", - ); - m.insert( - SqlStatement::GetBlockNumber, - "SELECT block_number FROM block", - ); - m -}); diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs index 731d2933..ae9e5a11 100644 --- a/anchor/database/src/multi_index.rs +++ b/anchor/database/src/multi_index.rs @@ -1,5 +1,6 @@ use dashmap::DashMap; -use std::{hash::Hash, marker::PhantomData}; +use std::hash::Hash; +use std::marker::PhantomData; /// Marker trait for uniquely identifying indicies pub trait Unique {} @@ -31,7 +32,7 @@ pub trait NonUniqueIndex { fn get_all_by(&self, key: &K) -> Option>; } -#[derive(Debug, Default)] +#[derive(Debug)] struct InnerMaps where K1: Eq + Hash, @@ -60,7 +61,7 @@ where /// - V: Value type /// - U1: Secondary index uniqueness (Unique or NotUnique) /// - U2: Tertiary index uniqueness (Unique or NotUnique) -#[derive(Debug, Default)] +#[derive(Debug)] pub struct MultiIndexMap where K1: Eq + Hash, @@ -71,6 +72,29 @@ where _marker: PhantomData<(U1, U2)>, } +impl Default for MultiIndexMap +where + K1: Eq + Hash + Clone, + K2: Eq + Hash + Clone, + K3: Eq + Hash + Clone, + V: Clone, + U1: 'static, + U2: 'static, +{ + fn default() -> Self { + Self { + maps: InnerMaps { + primary: DashMap::new(), + secondary_unique: DashMap::new(), + secondary_multi: DashMap::new(), + tertiary_unique: DashMap::new(), + tertiary_multi: DashMap::new(), + }, + _marker: PhantomData, + } + } +} + impl MultiIndexMap where K1: Eq + Hash + Clone, @@ -121,6 +145,49 @@ where .or_insert_with(|| vec![k1.clone()]); } } + + /// Remove a value and all its indexes using the primary key + pub fn remove(&self, k1: &K1) -> Option { + // Remove from primary storage + let removed = self.maps.primary.remove(k1)?; + + // Remove from secondary index + if std::any::TypeId::of::() == std::any::TypeId::of::() { + // For unique indexes, just remove the entry that points to this k1 + self.maps.secondary_unique.retain(|_, v| v != k1); + } else { + // For non-unique indexes, remove k1 from any vectors it appears in + self.maps.secondary_multi.retain(|_, v| { + v.retain(|x| x != k1); + !v.is_empty() + }); + } + + // Remove from tertiary index + if std::any::TypeId::of::() == std::any::TypeId::of::() { + // For unique indexes, just remove the entry that points to this k1 + self.maps.tertiary_unique.retain(|_, v| v != k1); + } else { + // For non-unique indexes, remove k1 from any vectors it appears in + self.maps.tertiary_multi.retain(|_, v| { + v.retain(|x| x != k1); + !v.is_empty() + }); + } + + Some(removed.1) + } + + /// Update an existing value using the primary key + /// Only updates if the primary key exists, indexes remain unchanged + pub fn update(&self, k1: &K1, new_value: V) -> Option { + if !self.maps.primary.contains_key(k1) { + return None; + } + + // Only update the value in primary storage + self.maps.primary.insert(k1.clone(), new_value) + } } // Implement unique access for primary key @@ -241,7 +308,7 @@ mod tests { map.insert(&pk_1, &cluster_id, &owner, share_1); map.insert(&pk_2, &cluster_id, &owner, share_2); - // This does not compile since + // This does not compile // let shares = map.get_all_by(&pk_1); // This does compile diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index ebbaf054..cf2ac09c 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -1,8 +1,8 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use base64::prelude::*; - use rusqlite::params; use ssv_types::{Operator, OperatorId}; +use std::sync::atomic::Ordering; /// Implements all operator related functionality on the database impl NetworkDatabase { @@ -16,34 +16,39 @@ impl NetworkDatabase { ))); } - let encoded = BASE64_STANDARD.encode( - operator - .rsa_pubkey - .public_key_to_pem() - .expect("Failed to encode RsaPublicKey"), - ); + // base64 encode the key for storage + let pem_key = operator + .rsa_pubkey + .public_key_to_pem() + .expect("Failed to encode RsaPublicKey"); + let encoded = BASE64_STANDARD.encode(pem_key.clone()); - // Insert into the database, then store in memory + // Insert into the database let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::InsertOperator])? - .execute(params![*operator.id, encoded, operator.owner.to_string()])?; + .execute(params![ + *operator.id, // the id of the registered operator + encoded, // RSA public key + operator.owner.to_string() // the owner address of the operator + ])?; - // Check to see if this operator is us and insert it into db - //self.state.operators.insert(operator.id, operator.clone()); - self.modify_state(|state| { - if state.id.is_none() { - let keys_match = operator - .rsa_pubkey - .public_key_to_pem() - .and_then(|key1| self.pubkey.public_key_to_pem().map(|key2| key1 == key2)) - .unwrap_or(false); - if keys_match { - state.id = Some(operator.id); - } + // Check to see if this operator is us and insert it into memory + let own_id = self.state.single_state.id.load(Ordering::Relaxed); + if own_id == u64::MAX { + // if the keys match, this is us so we want to save the id + let keys_match = pem_key == self.pubkey.public_key_to_pem().unwrap_or_default(); + if keys_match { + self.state + .single_state + .id + .store(*operator.id, Ordering::Relaxed); } - - state.operators.insert(operator.id, operator.clone()); - }); + } + // store the operator + self.state + .single_state + .operators + .insert(operator.id, operator.to_owned()); Ok(()) } @@ -64,7 +69,7 @@ impl NetworkDatabase { .execute(params![*id])?; // Remove the operator - self.modify_state(|state| state.operators.remove(&id)); + self.state.single_state.operators.remove(&id); Ok(()) } } diff --git a/anchor/database/src/share_operations.rs b/anchor/database/src/share_operations.rs index a1125d18..d7a48ddf 100644 --- a/anchor/database/src/share_operations.rs +++ b/anchor/database/src/share_operations.rs @@ -1,6 +1,6 @@ use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::{params, Transaction}; -use ssv_types::{ClusterId, OperatorId, Share}; +use ssv_types::Share; use types::PublicKey; /// Implements all Share related functionality on the database @@ -9,15 +9,13 @@ impl NetworkDatabase { &self, tx: &Transaction<'_>, share: &Share, - cluster_id: ClusterId, - operator_id: OperatorId, validator_pubkey: &PublicKey, ) -> Result<(), DatabaseError> { tx.prepare_cached(SQL[&SqlStatement::InsertShare])? .execute(params![ validator_pubkey.to_string(), - *cluster_id, - *operator_id, + *share.cluster_id, + *share.operator_id, share.share_pubkey.to_string(), share.encrypted_private_key ])?; diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs new file mode 100644 index 00000000..1257d6af --- /dev/null +++ b/anchor/database/src/sql_operations.rs @@ -0,0 +1,130 @@ +use std::collections::HashMap; +use std::sync::LazyLock; + +// Wrappers around various SQL statements used for interacting with the db +#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] +pub(crate) enum SqlStatement { + InsertOperator, + DeleteOperator, + GetOperatorId, + GetAllOperators, + + InsertCluster, + InsertClusterMember, + UpdateClusterStatus, + UpdateClusterFaulty, + DeleteCluster, + GetAllClusters, + GetClusterMembers, + + DeleteValidator, + InsertShare, + InsertValidator, + UpdateFeeRecipient, + SetGraffiti, + SetValidatorIndex, + + UpdateBlockNumber, + GetBlockNumber, + + GetShareAndValidator, +} + +pub(crate) static SQL: LazyLock> = LazyLock::new(|| { + let mut m = HashMap::new(); + m.insert( + SqlStatement::InsertOperator, + "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", + ); + m.insert( + SqlStatement::DeleteOperator, + "DELETE FROM operators WHERE operator_id = ?1", + ); + m.insert( + SqlStatement::GetOperatorId, + "SELECT operator_id FROM operators WHERE public_key = ?1", + ); + m.insert(SqlStatement::GetAllOperators, "SELECT * FROM operators"); + m.insert( + SqlStatement::InsertCluster, + "INSERT OR IGNORE INTO clusters (cluster_id, owner, fee_recipient) VALUES (?1, ?2, ?3)", + ); + m.insert( + SqlStatement::UpdateClusterStatus, + "UPDATE clusters SET liquidated = ?1 WHERE cluster_id = ?2", + ); + m.insert( + SqlStatement::UpdateClusterFaulty, + "UPDATE clusters SET faulty = ?1 WHERE cluster_id = ?2", + ); + m.insert( + SqlStatement::InsertClusterMember, + "INSERT OR IGNORE INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", + ); + m.insert( + SqlStatement::DeleteCluster, + "DELETE FROM clusters WHERE cluster_id = ?1", + ); + + m.insert( + SqlStatement::DeleteValidator, + "DELETE from validators WHERE validator_pubkey = ?1", + ); + m.insert( + SqlStatement::GetAllClusters, + "SELECT DISTINCT + c.cluster_id, + c.owner, + c.fee_recipient, + c.faulty, + c.liquidated + FROM clusters c + JOIN cluster_members cm ON c.cluster_id = cm.cluster_id + WHERE cm.operator_id = ?", + ); + m.insert( + SqlStatement::GetClusterMembers, + "SELECT operator_id FROM cluster_members WHERE cluster_id = ?1", + ); + m.insert(SqlStatement::InsertShare, + "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey, encrypted_key) VALUES (?1, ?2, ?3, ?4, ?5)"); + m.insert( + SqlStatement::InsertValidator, + "INSERT INTO validators (validator_pubkey, cluster_id, validator_index, graffiti) VALUES (?1, ?2, ?3, ?4)", + ); + m.insert( + SqlStatement::UpdateFeeRecipient, + "UPDATE clusters SET fee_recipient = ?1 WHERE owner = ?2", + ); + m.insert( + SqlStatement::SetGraffiti, + "UPDATE validators SET graffiti = ?1 WHERE validator_pubkey = ?2", + ); + m.insert( + SqlStatement::SetValidatorIndex, + "UPDATE validators SET validator_index = ?1 WHERE validator_pubkey = ?2", + ); + m.insert( + SqlStatement::UpdateBlockNumber, + "UPDATE block SET block_number = ?1", + ); + m.insert( + SqlStatement::GetBlockNumber, + "SELECT block_number FROM block", + ); + m.insert( + SqlStatement::GetShareAndValidator, + "SELECT + v.validator_pubkey, + v.cluster_id, + v.validator_index, + v.graffiti, + s.share_pubkey, + s.encrypted_key, + s.operator_id + FROM validators v + JOIN shares s ON v.validator_pubkey = s.validator_pubkey + WHERE s.operator_id = ?1", + ); + m +}); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index b8c847fd..d258eb9e 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -1,14 +1,17 @@ -use crate::{DatabaseError, NetworkDatabase, NetworkState, Pool, PoolConn, SqlStatement, SQL}; +use crate::{ClusterMultiIndexMap, MetadataMultiIndexMap, MultiIndexMap, ShareMultiIndexMap}; +use crate::{DatabaseError, NetworkDatabase, NetworkState, Pool, PoolConn}; +use crate::{MultiState, SingleState}; +use crate::{SqlStatement, SQL}; use base64::prelude::*; +use dashmap::{DashMap, DashSet}; use openssl::pkey::Public; use openssl::rsa::Rsa; use rusqlite::{params, OptionalExtension}; use ssv_types::{ - Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorIndex, - ValidatorMetadata, + Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorMetadata, }; -use std::collections::{HashMap, HashSet}; -use types::Address; +use std::collections::HashMap; +use std::sync::atomic::{AtomicU64, Ordering}; impl NetworkState { /// Build the network state from the database data @@ -28,59 +31,72 @@ impl NetworkState { let id = if let Ok(Some(operator_id)) = Self::does_self_exist(&conn, pubkey) { operator_id } else { - // If it does not exist, just default the state + // If it does not exist, just default the state since we do not know who we are return Ok(Self { - last_processed_block, - ..Default::default() + multi_state: MultiState { + shares: MultiIndexMap::default(), + validator_metadata: MultiIndexMap::default(), + clusters: MultiIndexMap::default(), + }, + single_state: SingleState::default(), }); }; // First Phase: Fetch data from the database - // Get all of the operators from the network + // The two main data structures are a map of ClusterId -> Cluster and ClusterID -> + // Vec<(Share, ValidatorMetadata)>. This greatly simplifies data handling and makes it very + // easy to add more customized stores in the future. Also, just fetch the operators let operators = Self::fetch_operators(&conn)?; - // Get clusters that this operator (id) participates in + let share_validator = Self::fetch_shares_and_validators(&conn, id)?; let clusters = Self::fetch_clusters(&conn, id)?; - // Second phase: Transform data into efficient state stores - // Pre-allocate HashMaps with known capacity - let num_clusters = clusters.len(); - let mut shares: HashMap = HashMap::with_capacity(num_clusters); - let mut validator_metadata: HashMap = - HashMap::with_capacity(num_clusters); - let mut cluster_members: HashMap> = - HashMap::with_capacity(num_clusters); - - // Populate state stores from cluster data - clusters.iter().for_each(|cluster| { - let cluster_id = cluster.cluster_id; - - // Store validator metadata for each cluster - validator_metadata.insert(cluster_id, cluster.validator_metadata.to_owned()); - - // Process each member in the cluster - for member in cluster.cluster_members.clone().into_iter() { - // Track cluster membership - cluster_members - .entry(cluster_id) - .or_default() - .insert(member.operator_id); - - // If this member is us, store our share - if member.operator_id == id { - shares.insert(cluster_id, member.share); - } - } + // Second phase: Populate all in memory stores with data; + let shares_multi: ShareMultiIndexMap = MultiIndexMap::new(); + let metadata_multi: MetadataMultiIndexMap = MultiIndexMap::new(); + let cluster_multi: ClusterMultiIndexMap = MultiIndexMap::new(); + let single_state = SingleState { + id: AtomicU64::new(*id), + last_processed_block: AtomicU64::new(last_processed_block), + operators: DashMap::from_iter(operators), + clusters: DashSet::from_iter(clusters.keys().copied()), + }; + + // Insert all of the cluster information + clusters.iter().for_each(|(cluster_id, cluster)| { + let validator_key = share_validator + .get(cluster_id) + .expect("Validator should exist") + .1 + .public_key + .clone(); + cluster_multi.insert(cluster_id, &validator_key, &cluster.owner, cluster.clone()); }); + // Insert all of the share and validator_metadata + share_validator + .into_iter() + .for_each(|(cluster_id, (share, metadata))| { + let cluster_owner = clusters + .get(&cluster_id) + .expect("Cluster should exist") + .owner; + shares_multi.insert(&metadata.public_key, &cluster_id, &cluster_owner, share); + metadata_multi.insert( + &metadata.public_key, + &cluster_id, + &cluster_owner, + metadata.to_owned(), + ); + }); + // Return fully constructed state Ok(Self { - id: Some(id), - operators, - clusters: clusters.iter().map(|c| c.cluster_id).collect(), - shares, - validator_metadata, - cluster_members, - last_processed_block, + multi_state: MultiState { + shares: shares_multi, + validator_metadata: metadata_multi, + clusters: cluster_multi, + }, + single_state, }) } @@ -120,22 +136,42 @@ impl NetworkState { operators.collect() } + // Fetch all of the validators and their associated share. Fetched together so that we can + // guarantee that they pair up correctly + fn fetch_shares_and_validators( + conn: &PoolConn, + operator_id: OperatorId, + ) -> Result, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetShareAndValidator])?; + let data = stmt + .query_map([*operator_id], |row| { + let metadata = ValidatorMetadata::try_from(row)?; + let share = Share::try_from(row)?; + Ok((metadata.cluster_id, (share, metadata))) + })? + .map(|result| result.map_err(DatabaseError::from)); + data.collect::, _>>() + } + // Fetch and transform cluster data for a specific operator fn fetch_clusters( conn: &PoolConn, operator_id: OperatorId, - ) -> Result, DatabaseError> { + ) -> Result, DatabaseError> { let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllClusters])?; - let cluster = stmt - .query_map([operator_id.0], |row| { + let clusters = stmt + .query_map([*operator_id], |row| { let cluster_id = ClusterId(row.get(0)?); - // Get all of the cluster members, and then construct the cluster + // Get all of the members for this cluster let cluster_members = Self::fetch_cluster_members(conn, cluster_id)?; - Cluster::try_from((row, cluster_members)) + + // Convert row and members into cluster + let cluster = Cluster::try_from((row, cluster_members))?; + Ok((cluster_id, cluster)) })? .map(|result| result.map_err(DatabaseError::from)); - cluster.collect() + clusters.collect::, _>>() } // Fetch members of a specific cluster @@ -144,78 +180,49 @@ impl NetworkState { cluster_id: ClusterId, ) -> Result, rusqlite::Error> { let mut stmt = conn.prepare(SQL[&SqlStatement::GetClusterMembers])?; - let cluster_members = stmt.query_map([cluster_id.0], |row| { - // Fetch all of the cluster members for the given ClusterId - let share = row.try_into()?; + let members = stmt.query_map([cluster_id.0], |row| { Ok(ClusterMember { - operator_id: OperatorId(row.get(1)?), + operator_id: OperatorId(row.get(0)?), cluster_id, - share, }) })?; - cluster_members.collect() + + members.collect() } } -// Clean interface for accessing network state +// Clean interface for accessing Single state data impl NetworkDatabase { /// Get operator data from in-memory store pub fn get_operator(&self, id: &OperatorId) -> Option { - self.read_state(|state| state.operators.get(id).cloned()) + self.state.single_state.operators.get(id).map(|v| v.clone()) } - /// Check if an operator exists - pub fn operator_exists(&self, id: &OperatorId) -> bool { - self.read_state(|state| state.operators.contains_key(id)) + /// Get the ID of our Operator if it exists + pub fn get_own_id(&self) -> Option { + let id = self.state.single_state.id.load(Ordering::Relaxed); + if id == u64::MAX { + None + } else { + Some(OperatorId(id)) + } } - /// Check if a cluster exists - pub fn cluster_exists(&self, id: &ClusterId) -> bool { - self.read_state(|state| state.clusters.contains(id)) + /// Check if an operator exists + pub fn operator_exists(&self, id: &OperatorId) -> bool { + self.state.single_state.operators.contains_key(id) } /// Check if we are a member of a specific cluster pub fn member_of_cluster(&self, id: &ClusterId) -> bool { - self.read_state(|state| state.clusters.contains(id)) - } - - /// Get own share of key for a Cluster we are a member in - pub fn get_share(&self, id: &ClusterId) -> Option { - self.read_state(|state| state.shares.get(id).cloned()) - } - - /// Set the id of our own operator - pub fn set_own_id(&self, id: OperatorId) { - self.modify_state(|state| state.id = Some(id)) - } - - /// Get the metatdata for the cluster - pub fn get_validator_metadata(&self, id: &ClusterId) -> Option { - self.read_state(|state| state.validator_metadata.get(id).cloned()) + self.state.single_state.clusters.contains(id) } /// Get the last block that has been fully processed by the database pub fn get_last_processed_block(&self) -> u64 { - self.read_state(|state| state.last_processed_block) - } - - /// Get the Fee Recipient address - pub fn get_fee_recipient(&self, id: &ClusterId) -> Option
{ - self.read_state(|state| { - state - .validator_metadata - .get(id) - .map(|metadata| metadata.fee_recipient) - }) - } - - /// Get the Validator Index - pub fn get_validator_index(&self, id: &ClusterId) -> Option { - self.read_state(|state| { - state - .validator_metadata - .get(id) - .map(|metadata| metadata.validator_index) - }) + self.state + .single_state + .last_processed_block + .load(Ordering::Relaxed) } } diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 589943e2..1c37d9b7 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -12,6 +12,8 @@ CREATE TABLE operators ( CREATE TABLE clusters ( cluster_id INTEGER PRIMARY KEY, + owner TEXT NOT NULL, + fee_recipient TEXT NOT NULL, faulty INTEGER DEFAULT 0, liquidated BOOLEAN DEFAULT FALSE ); @@ -27,11 +29,9 @@ CREATE TABLE cluster_members ( CREATE TABLE validators ( validator_pubkey TEXT PRIMARY KEY, cluster_id INTEGER NOT NULL, - fee_recipient TEXT NOT NULL, - owner TEXT, - graffiti BLOB DEFAULT X'0000000000000000000000000000000000000000000000000000000000000000', validator_index INTEGER DEFAULT 0, - FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE + graffiti BLOB DEFAULT X'0000000000000000000000000000000000000000000000000000000000000000', + FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ); CREATE TABLE shares ( @@ -45,3 +45,14 @@ CREATE TABLE shares ( FOREIGN KEY (validator_pubkey) REFERENCES validators(validator_pubkey) ON DELETE CASCADE ); +-- Add trigger to clean up empty clusters +CREATE TRIGGER delete_empty_clusters +AFTER DELETE ON validators +WHEN NOT EXISTS ( + SELECT 1 FROM validators + WHERE cluster_id = OLD.cluster_id +) +BEGIN + DELETE FROM clusters WHERE cluster_id = OLD.cluster_id; +END; + diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index a78fa192..b783140f 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -3,6 +3,7 @@ use super::test_prelude::*; #[cfg(test)] mod cluster_database_tests { use super::*; + /* #[test] // Test inserting a cluster into the database @@ -98,4 +99,5 @@ mod cluster_database_tests { .insert_cluster(fixture.cluster) .expect_err("Expected failure when inserting cluster that already exists"); } + */ } diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs index 262bfa39..aa127188 100644 --- a/anchor/database/src/tests/mod.rs +++ b/anchor/database/src/tests/mod.rs @@ -10,6 +10,8 @@ pub mod test_prelude { pub use ssv_types::*; pub use tempfile::tempdir; pub use types::{Address, Graffiti, PublicKey}; + pub use crate::multi_index::{UniqueIndex, NonUniqueIndex}; + pub use crate::sql_operations::{SqlStatement, SQL}; } #[cfg(test)] diff --git a/anchor/database/src/tests/operator_tests.rs b/anchor/database/src/tests/operator_tests.rs index c0f759a1..68179674 100644 --- a/anchor/database/src/tests/operator_tests.rs +++ b/anchor/database/src/tests/operator_tests.rs @@ -19,8 +19,8 @@ mod operator_database_tests { .expect("Failed to insert operator"); // Confirm that it exists both in the db and the state store - assertions::assert_operator_exists_in_db(&fixture.db, &operator); - assertions::assert_operator_exists_in_store(&fixture.db, &operator); + assertions::operator::exists_in_db(&fixture.db, &operator); + assertions::operator::exists_in_memory(&fixture.db, &operator); } #[test] @@ -37,10 +37,7 @@ mod operator_database_tests { .expect("Failed to insert operator"); // Try to insert it again, this should fail - let success = fixture.db.insert_operator(&operator); - if success.is_ok() { - panic!("Expected an error when inserting an operator that is already present"); - } + assert!(fixture.db.insert_operator(&operator).is_err()); } #[test] @@ -63,8 +60,8 @@ mod operator_database_tests { .expect("Failed to delete operator"); // Confirm that it is gone - assertions::assert_operator_not_exists_in_db(&fixture.db, operator.id); - assertions::assert_operator_not_exists_in_store(&fixture.db, operator.id); + assertions::operator::exists_not_in_memory(&fixture.db, operator.id); + assertions::operator::exists_not_in_db(&fixture.db, operator.id); } #[test] @@ -88,8 +85,8 @@ mod operator_database_tests { .db .delete_operator(operator.id) .expect("Failed to delete operator"); - assertions::assert_operator_not_exists_in_db(&fixture.db, operator.id); - assertions::assert_operator_not_exists_in_store(&fixture.db, operator.id); + assertions::operator::exists_not_in_memory(&fixture.db, operator.id); + assertions::operator::exists_not_in_db(&fixture.db, operator.id); } } @@ -97,9 +94,6 @@ mod operator_database_tests { /// Try to delete an operator that does not exist fn test_delete_dne_operator() { let fixture = TestFixture::new_empty(); - fixture - .db - .delete_operator(OperatorId(1)) - .expect_err("Deletion should fail. Operator DNE"); + assert!(fixture.db.delete_operator(OperatorId(1)).is_err()) } } diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 42495b6c..8faf4d71 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -4,6 +4,7 @@ use super::test_prelude::*; mod state_database_tests { use super::*; + /* #[test] // Test that the previously inserted operators are present after restart fn test_operator_store() { @@ -68,4 +69,5 @@ mod state_database_tests { .expect("Failed to create database"); assert_eq!(fixture.db.get_last_processed_block(), 10); } + */ } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index d17ccc5d..cdcb5437 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -43,8 +43,15 @@ impl TestFixture { db.insert_operator(op).expect("Failed to insert operator"); }); + // Build cluster, shares, and validator data let cluster = generators::cluster::with_operators(&operators); - db.insert_cluster(cluster.clone()) + let validator = generators::validator::random_metadata(cluster.cluster_id); + let shares = operators + .iter() + .map(|op| generators::share::random(cluster.cluster_id, op.id)) + .collect(); + + db.insert_validator(cluster.clone(), validator, shares) .expect("Failed to insert cluster"); Self { @@ -99,35 +106,35 @@ pub mod generators { use super::*; // Generate a fully cluster with a configurable number of operators + // Generate a random cluster pub fn random(num_operators: u64) -> Cluster { let cluster_id = ClusterId(rand::thread_rng().gen::().into()); - let members = (0..num_operators) - .map(|i| member::new(cluster_id, OperatorId(i))) - .collect(); + let members = (0..num_operators).map(OperatorId).collect(); + let owner_recipient = Address::random(); Cluster { cluster_id, - cluster_members: members, + owner: owner_recipient, + fee_recipient: owner_recipient, faulty: 0, liquidated: false, - validator_metadata: validator::random_metadata(), + cluster_members: members, } } // Generate a cluster with a specific set of operators pub fn with_operators(operators: &[Operator]) -> Cluster { let cluster_id = ClusterId(rand::thread_rng().gen::().into()); - let members = operators - .iter() - .map(|op| member::new(cluster_id, op.id)) - .collect(); + let members = operators.iter().map(|op| op.id).collect(); + let owner_recipient = Address::random(); Cluster { cluster_id, - cluster_members: members, + owner: owner_recipient, + fee_recipient: owner_recipient, faulty: 0, liquidated: false, - validator_metadata: validator::random_metadata(), + cluster_members: members, } } } @@ -140,7 +147,6 @@ pub mod generators { ClusterMember { operator_id, cluster_id, - share: share::random(), } } } @@ -149,8 +155,10 @@ pub mod generators { use super::*; // Generate a random keyshare - pub fn random() -> Share { + pub fn random(cluster_id: ClusterId, operator_id: OperatorId) -> Share { Share { + operator_id, + cluster_id, share_pubkey: pubkey::random(), encrypted_private_key: [0u8; 256], } @@ -181,14 +189,12 @@ pub mod generators { // Generate random ValidatorMetdata // assumes fee_recipient = owner. - pub fn random_metadata() -> ValidatorMetadata { - let recipient_owner = Address::random(); + pub fn random_metadata(cluster_id: ClusterId) -> ValidatorMetadata { ValidatorMetadata { - validator_index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), - validator_pubkey: pubkey::random(), - fee_recipient: recipient_owner, + public_key: pubkey::random(), + cluster_id, + index: ValidatorIndex(rand::thread_rng().gen_range(0..100)), graffiti: Graffiti::default(), - owner: recipient_owner, } } } @@ -212,6 +218,7 @@ pub mod queries { // Get a Cluster from the database pub fn get_cluster(db: &NetworkDatabase, id: ClusterId) -> Option<(i64, i64, bool)> { let conn = db.connection().unwrap(); + let cluster = conn .prepare("SELECT cluster_id, faulty, liquidated FROM clusters WHERE cluster_id = ?1") .unwrap() @@ -253,14 +260,22 @@ pub mod queries { db: &NetworkDatabase, cluster_id: ClusterId, operator_id: OperatorId, - ) -> Option<(i64, i64)> { - let conn = db.connection().unwrap(); - let member = conn.prepare("SELECT cluster_id, operator_id FROM cluster_members WHERE cluster_id = ?1 AND operator_id = ?2") - .unwrap() - .query_row(params![*cluster_id, *operator_id], |row| Ok((row.get(0)?, row.get(1)?))) + ) -> Option> { + let conn = db.connection().expect("Failed to get a DB connection"); + let mut stmt = conn + .prepare(SQL[&SqlStatement::GetClusterMembers]) + .expect("Failed to prepare statement"); + let members = stmt + .query_map([cluster_id.0], |row| { + Ok(ClusterMember { + operator_id: OperatorId(row.get(0)?), + cluster_id, + }) + }) .optional() .unwrap(); - member + + members.collect() } // Get ValidatorMetadata from the database @@ -283,249 +298,244 @@ pub mod assertions { use super::*; - // State Store (In-Memory) Assertions - // These assertions verify the in memory state maintained by the application - - // Verifies that the operator is in the state store - pub fn assert_operator_exists_in_store(db: &NetworkDatabase, operator: &Operator) { - // Check operator exists in memory state - let stored_operator = db.read_state(|state| { - state - .operators - .get(&operator.id) - .expect("Operator should exist in memory state") - .clone() - }); + // Assertions on operator information fetches from in memory and the database + pub mod operator { + use super::*; - // Verify all fields match - assert_eq!(stored_operator.id, operator.id, "Operator ID mismatch"); - assert_eq!( - stored_operator.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap(), - "Operator public key mismatch" - ); - assert_eq!( - stored_operator.owner, operator.owner, - "Operator owner mismatch" - ); - } + // Asserts data between the two operators is the same + fn data(op1: &Operator, op2: &Operator) { + // Verify all fields match + assert_eq!(op1.id, op2.id, "Operator ID mismatch"); + assert_eq!( + op1.rsa_pubkey.public_key_to_pem().unwrap(), + op2.rsa_pubkey.public_key_to_pem().unwrap(), + "Operator public key mismatch" + ); + assert_eq!(op1.owner, op2.owner, "Operator owner mismatch"); + } - // Verifies that the operator is not in the state store - pub fn assert_operator_not_exists_in_store(db: &NetworkDatabase, operator: OperatorId) { - assert!(!db.operator_exists(&operator)); + // Verifies that the operator is in memory + pub fn exists_in_memory(db: &NetworkDatabase, operator: &Operator) { + let stored_operator = db + .get_operator(&operator.id) + .expect("Operator should exist"); + data(operator, &stored_operator); + } + + // Verifies that the operator is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, operator: OperatorId) { + assert!(!db.operator_exists(&operator)); + } + + // Verify that the operator is in the database + pub fn exists_in_db(db: &NetworkDatabase, operator: &Operator) { + let db_operator = + queries::get_operator(db, operator.id).expect("Operator not found in database"); + data(operator, &db_operator); + } + + // Verify that the operator does not exist in the database + pub fn exists_not_in_db(db: &NetworkDatabase, operator_id: OperatorId) { + // Check database + assert!( + queries::get_operator(db, operator_id).is_none(), + "Operator still exists in database" + ); + } } - // Verifies that the cluster does not exist in the state store - pub fn assert_cluster_exists_not_in_store(db: &NetworkDatabase, cluster: &Cluster) { - // Just make sure we have 0 references to the cluster_id - db.read_state(|state| { - let cluster_id = cluster.cluster_id; - assert!(!state.clusters.contains(&cluster_id)); - assert!(!state.shares.contains_key(&cluster_id)); - assert!(!state.validator_metadata.contains_key(&cluster_id)); - assert!(!state.cluster_members.contains_key(&cluster_id)); - assert!(!state.cluster_members.contains_key(&cluster_id)); - }); + // All validator related assertions + pub mod validator { + use super::*; } - // Verifies that the cluster exists correctly in the state store - pub fn assert_cluster_exists_in_store(db: &NetworkDatabase, cluster: &Cluster) { - // - operators: HashMap, - // Verify all operators exist and are cluster members - db.read_state(|state| { - let operator_ids: Vec = cluster - .cluster_members - .iter() - .map(|c| c.operator_id) - .collect(); - - for id in operator_ids { - // Check operator exists - assert!( - db.operator_exists(&id), - "Operator {} not found in database", - *id - ); + /* - // - cluster_members: HashMap>, - // Check operator is recorded as cluster member - assert!( - state.cluster_members[&cluster.cluster_id].contains(&id), - "Operator {} not recorded as cluster member in memory state", - *id - ); - } - // - clusters: HashSet, - // Verify cluster is recorded in memory state - assert!( - state.clusters.contains(&cluster.cluster_id), - "Cluster ID not found in memory state" - ); + // Verifies that the cluster does not exist in the state store + pub fn assert_cluster_exists_not_in_store(db: &NetworkDatabase, cluster: &Cluster) { + // Just make sure we have 0 references to the cluster_id + db.read_state(|state| { + let cluster_id = cluster.cluster_id; + assert!(!state.clusters.contains(&cluster_id)); + assert!(!state.shares.contains_key(&cluster_id)); + assert!(!state.validator_metadata.contains_key(&cluster_id)); + assert!(!state.cluster_members.contains_key(&cluster_id)); + assert!(!state.cluster_members.contains_key(&cluster_id)); + }); + } - // - shares: HashMap, - // Verify shares exists and share data matches if we're a member - if let Some(our_id) = state.id { - if let Some(our_member) = cluster + // Verifies that the cluster exists correctly in the state store + pub fn assert_cluster_exists_in_store(db: &NetworkDatabase, cluster: &Cluster) { + // - operators: HashMap, + // Verify all operators exist and are cluster members + db.read_state(|state| { + let operator_ids: Vec = cluster .cluster_members .iter() - .find(|m| m.operator_id == our_id) - { - let stored_share = state.shares[&cluster.cluster_id].clone(); - assert_eq!( - stored_share.share_pubkey, our_member.share.share_pubkey, - "Share public key mismatch" + .map(|c| c.operator_id) + .collect(); + + for id in operator_ids { + // Check operator exists + assert!( + db.operator_exists(&id), + "Operator {} not found in database", + *id ); - assert_eq!( - stored_share.encrypted_private_key, our_member.share.encrypted_private_key, - "Encrypted private key mismatch" + + // - cluster_members: HashMap>, + // Check operator is recorded as cluster member + assert!( + state.cluster_members[&cluster.cluster_id].contains(&id), + "Operator {} not recorded as cluster member in memory state", + *id ); } - } - assert!( - state.shares.contains_key(&cluster.cluster_id), - "No share found for cluster" - ); - // - validator_metadata: HashMap, - // Verify validator metadata matches - let validator_metadata = db - .get_validator_metadata(&cluster.cluster_id) - .expect("Failed to get metadata") - .clone(); - assert_eq!( - validator_metadata.owner, cluster.validator_metadata.owner, - "Validator owner mismatch" - ); - assert_eq!( - validator_metadata.validator_index, cluster.validator_metadata.validator_index, - "Validator index mismatch" - ); + // - clusters: HashSet, + // Verify cluster is recorded in memory state + assert!( + state.clusters.contains(&cluster.cluster_id), + "Cluster ID not found in memory state" + ); + + // - shares: HashMap, + // Verify shares exists and share data matches if we're a member + if let Some(our_id) = state.id { + if let Some(our_member) = cluster + .cluster_members + .iter() + .find(|m| m.operator_id == our_id) + { + let stored_share = state.shares[&cluster.cluster_id].clone(); + assert_eq!( + stored_share.share_pubkey, our_member.share.share_pubkey, + "Share public key mismatch" + ); + assert_eq!( + stored_share.encrypted_private_key, our_member.share.encrypted_private_key, + "Encrypted private key mismatch" + ); + } + } + assert!( + state.shares.contains_key(&cluster.cluster_id), + "No share found for cluster" + ); + + // - validator_metadata: HashMap, + // Verify validator metadata matches + let validator_metadata = db + .get_validator_metadata(&cluster.cluster_id) + .expect("Failed to get metadata") + .clone(); + assert_eq!( + validator_metadata.owner, cluster.validator_metadata.owner, + "Validator owner mismatch" + ); + assert_eq!( + validator_metadata.validator_index, cluster.validator_metadata.validator_index, + "Validator index mismatch" + ); + assert_eq!( + validator_metadata.fee_recipient, cluster.validator_metadata.fee_recipient, + "Fee recipient mismatch" + ); + assert_eq!( + validator_metadata.graffiti, cluster.validator_metadata.graffiti, + "Graffiti mismatch" + ); + }); + } + + // Database (Persistent Storage) Assertions + // These assertions verify the persistent state in the SQLite dataabase + + + // Verifies that a cluster exists in the database + pub fn assert_cluster_exists_in_db(db: &NetworkDatabase, cluster: &Cluster) { + // Check cluster base data + let (id, faulty, liquidated) = + queries::get_cluster(db, cluster.cluster_id).expect("Cluster not found in database"); + + assert_eq!(id as u64, *cluster.cluster_id, "Cluster ID mismatch"); assert_eq!( - validator_metadata.fee_recipient, cluster.validator_metadata.fee_recipient, - "Fee recipient mismatch" + faulty as u64, cluster.faulty, + "Cluster faulty count mismatch" ); assert_eq!( - validator_metadata.graffiti, cluster.validator_metadata.graffiti, - "Graffiti mismatch" + liquidated, cluster.liquidated, + "Cluster liquidated status mismatch" ); - }); - } - // Database (Persistent Storage) Assertions - // These assertions verify the persistent state in the SQLite dataabase - - // Verify that the operator is in the database - pub fn assert_operator_exists_in_db(db: &NetworkDatabase, operator: &Operator) { - let db_operator = - queries::get_operator(db, operator.id).expect("Operator not found in database"); - - assert_eq!( - db_operator.rsa_pubkey.public_key_to_pem().unwrap(), - operator.rsa_pubkey.public_key_to_pem().unwrap(), - "Operator public key mismatch in database" - ); - assert_eq!( - db_operator.id, operator.id, - "Operator ID mismatch in database" - ); - assert_eq!( - db_operator.owner, operator.owner, - "Operator owner mismatch in database" - ); - } + // Verify cluster members + for member in &cluster.cluster_members { + let member_exists = + queries::get_cluster_member(db, member.cluster_id, member.operator_id) + .expect("Cluster member not found in database"); - // Verify that the operator does not exist in the database - pub fn assert_operator_not_exists_in_db(db: &NetworkDatabase, operator_id: OperatorId) { - // Check database - assert!( - queries::get_operator(db, operator_id).is_none(), - "Operator still exists in database" - ); - } + assert_eq!( + member_exists.0 as u64, *member.cluster_id, + "Cluster member cluster ID mismatch" + ); + assert_eq!( + member_exists.1 as u64, *member.operator_id, + "Cluster member operator ID mismatch" + ); + } + + // Verify shares + let shares = queries::get_shares(db, cluster.cluster_id); + assert!(!shares.is_empty(), "No shares found for cluster"); - // Verifies that a cluster exists in the database - pub fn assert_cluster_exists_in_db(db: &NetworkDatabase, cluster: &Cluster) { - // Check cluster base data - let (id, faulty, liquidated) = - queries::get_cluster(db, cluster.cluster_id).expect("Cluster not found in database"); - - assert_eq!(id as u64, *cluster.cluster_id, "Cluster ID mismatch"); - assert_eq!( - faulty as u64, cluster.faulty, - "Cluster faulty count mismatch" - ); - assert_eq!( - liquidated, cluster.liquidated, - "Cluster liquidated status mismatch" - ); - - // Verify cluster members - for member in &cluster.cluster_members { - let member_exists = - queries::get_cluster_member(db, member.cluster_id, member.operator_id) - .expect("Cluster member not found in database"); + // Verify validator metadata + let validator = + queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) + .expect("Validator not found in database"); assert_eq!( - member_exists.0 as u64, *member.cluster_id, - "Cluster member cluster ID mismatch" + validator.0, + cluster.validator_metadata.validator_pubkey.to_string(), + "Validator pubkey mismatch" + ); + assert_eq!( + validator.1 as u64, *cluster.cluster_id, + "Validator cluster ID mismatch" ); assert_eq!( - member_exists.1 as u64, *member.operator_id, - "Cluster member operator ID mismatch" + validator.2, + cluster.validator_metadata.owner.to_string(), + "Validator owner mismatch" ); } - // Verify shares - let shares = queries::get_shares(db, cluster.cluster_id); - assert!(!shares.is_empty(), "No shares found for cluster"); - - // Verify validator metadata - let validator = - queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) - .expect("Validator not found in database"); - - assert_eq!( - validator.0, - cluster.validator_metadata.validator_pubkey.to_string(), - "Validator pubkey mismatch" - ); - assert_eq!( - validator.1 as u64, *cluster.cluster_id, - "Validator cluster ID mismatch" - ); - assert_eq!( - validator.2, - cluster.validator_metadata.owner.to_string(), - "Validator owner mismatch" - ); - } + // Verifies that a cluster does not exist in the database + pub fn assert_cluster_exists_not_in_db(db: &NetworkDatabase, cluster: &Cluster) { + // Verify cluster base data is gone + assert!( + queries::get_cluster(db, cluster.cluster_id).is_none(), + "Cluster still exists in database" + ); - // Verifies that a cluster does not exist in the database - pub fn assert_cluster_exists_not_in_db(db: &NetworkDatabase, cluster: &Cluster) { - // Verify cluster base data is gone - assert!( - queries::get_cluster(db, cluster.cluster_id).is_none(), - "Cluster still exists in database" - ); + // Verify all cluster members are gone + for member in &cluster.cluster_members { + assert!( + queries::get_cluster_member(db, member.cluster_id, member.operator_id).is_none(), + "Cluster member still exists in database" + ); + } + + // Verify all shares are gone + let shares = queries::get_shares(db, cluster.cluster_id); + assert!(shares.is_empty(), "Shares still exist for cluster"); - // Verify all cluster members are gone - for member in &cluster.cluster_members { + // Verify validator metadata is gone assert!( - queries::get_cluster_member(db, member.cluster_id, member.operator_id).is_none(), - "Cluster member still exists in database" + queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) + .is_none(), + "Validator still exists in database" ); } - - // Verify all shares are gone - let shares = queries::get_shares(db, cluster.cluster_id); - assert!(shares.is_empty(), "Shares still exist for cluster"); - - // Verify validator metadata is gone - assert!( - queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) - .is_none(), - "Validator still exists in database" - ); - } + */ } diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index fd55ec46..5b8cc7de 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -9,32 +9,34 @@ mod validator_database_tests { fn test_update_fee_recipient() { let fixture = TestFixture::new(); let cluster = &fixture.cluster; - let new_address = Address::random(); + let new_fee_recipient = Address::random(); // Update fee recipient - fixture + assert!(fixture .db - .update_fee_recipient( - cluster.cluster_id, - cluster.validator_metadata.validator_pubkey.clone(), - new_address, - ) - .expect("Failed to update fee recipient"); + .update_fee_recipient(cluster.owner, new_fee_recipient) + .is_ok()); - // Verify update in memory state - let metadata = &fixture + // Assert data has changed in memory and database + let memory_cluster = fixture .db - .get_validator_metadata(&cluster.cluster_id) - .expect("Failed to get cluster metadata"); + .state + .multi_state + .clusters + .get_by(&cluster.cluster_id) + .expect("Cluster shoulde exist"); assert_eq!( - metadata.fee_recipient, new_address, - "Fee recipient not updated in memory" + memory_cluster.fee_recipient, new_fee_recipient, + "Fee recipient was not updated" ); + + + // + + /* + // Verify update in database - let validator = queries::get_validator( - &fixture.db, - &cluster.validator_metadata.validator_pubkey.to_string(), ) .expect("Validator not found in database"); assert_eq!( @@ -42,7 +44,9 @@ mod validator_database_tests { new_address.to_string(), "Fee recipient not updated in database" ); + */ } + /* #[test] /// Test updating the graffiti of a validator @@ -89,4 +93,5 @@ mod validator_database_tests { "Should fail when updating non-existent cluster" ); } + */ } diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 5751c80a..ef870bd6 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -1,6 +1,5 @@ -use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; +use crate::{multi_index::UniqueIndex, DatabaseError, NetworkDatabase, SqlStatement, SQL}; use rusqlite::params; -use ssv_types::ClusterId; use types::{Address, Graffiti, PublicKey}; /// Implements all validator related db functionality @@ -8,69 +7,56 @@ impl NetworkDatabase { /// Update the fee recipient address for a validator pub fn update_fee_recipient( &self, - cluster_id: ClusterId, - validator_pubkey: PublicKey, + owner: Address, fee_recipient: Address, ) -> Result<(), DatabaseError> { - // Make sure we are part of the cluster for this Validator - let is_member = self.read_state(|state| state.clusters.contains(&cluster_id)); - if !is_member { - return Err(DatabaseError::NotFound(format!( - "Validator for Cluster {} not in database", - *cluster_id - ))); - } - let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? .execute(params![ - fee_recipient.to_string(), - validator_pubkey.to_string() + fee_recipient.to_string(), // new fee recipient address for entire cluster + owner.to_string() // owner of the cluster ])?; - self.modify_state(|state| { - let metadata = state - .validator_metadata - .get_mut(&cluster_id) - .expect("Cluster should exist"); - metadata.fee_recipient = fee_recipient; - }); - + // if we are in the cluster, update the in memory fee recipient for the cluster + if let Some(mut cluster) = self.state.multi_state.clusters.get_by(&owner) { + // update recipient and insert back in to update + cluster.fee_recipient = fee_recipient; + self.state + .multi_state + .clusters + .update(&cluster.cluster_id, cluster.to_owned()); + } Ok(()) } /// Update the graffiti for a validator pub fn update_graffiti( &self, - cluster_id: ClusterId, validator_pubkey: PublicKey, graffiti: Graffiti, ) -> Result<(), DatabaseError> { - let is_member = self.read_state(|state| state.clusters.contains(&cluster_id)); - if !is_member { - return Err(DatabaseError::NotFound(format!( - "Validator for Cluster {} not in database", - *cluster_id - ))); - } - // Update the database let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::SetGraffiti])? .execute(params![ - graffiti.0.as_slice(), // Convert [u8; 32] to &[u8] - validator_pubkey.to_string() + graffiti.0.as_slice(), // new graffiti + validator_pubkey.to_string() // the public key of the validator ])?; - // Update the in-memory state - self.modify_state(|state| { - let metadata = state + // If we operate on behalf of the validator, update the in memory state + if let Some(mut validator) = self + .state + .multi_state + .validator_metadata + .get_by(&validator_pubkey) + { + // update graffiti and insert back in to update + validator.graffiti = graffiti; + self.state + .multi_state .validator_metadata - .get_mut(&cluster_id) - .expect("Cluster should exist"); - metadata.graffiti = graffiti; - }); - + .update(&validator_pubkey, validator); + } Ok(()) } } From 90ae6c510de81056eecbfa7319a589c0e6fdc747 Mon Sep 17 00:00:00 2001 From: Zachary Holme Date: Sat, 21 Dec 2024 07:58:51 -0600 Subject: [PATCH 32/50] integrate all tests --- Cargo.lock | 100 ++--- .../common/ssv_types/src/sql_conversions.rs | 4 +- anchor/database/src/cluster_operations.rs | 30 +- anchor/database/src/tests/cluster_tests.rs | 93 ++--- anchor/database/src/tests/mod.rs | 3 +- anchor/database/src/tests/state_tests.rs | 17 +- anchor/database/src/tests/utils.rs | 383 ++++++++---------- anchor/database/src/tests/validator_tests.rs | 93 +---- anchor/database/src/validator_operations.rs | 6 +- 9 files changed, 292 insertions(+), 437 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f734a0f..8d1769af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -896,7 +896,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "alloy-primitives", "arbitrary", @@ -1027,9 +1027,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.4" +version = "1.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9157bbaa6b165880c27a4293a474c91cdcf265cc68cc829bf10be0964a391caf" +checksum = "c31a0499c1dc64f458ad13872de75c0eb7e3fdb0e67964610c914b034fc5956e" dependencies = [ "jobserver", "libc", @@ -1165,7 +1165,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "alloy-primitives", "clap", @@ -1229,7 +1229,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "itertools 0.10.5", ] @@ -1246,7 +1246,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "quote", "syn 1.0.109", @@ -1868,7 +1868,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2332,7 +2332,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2354,7 +2354,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2422,7 +2422,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "bytes", "discv5 0.9.0", @@ -2837,7 +2837,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2862,9 +2862,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -3183,7 +3183,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "async-channel", "asynchronous-codec", @@ -3980,7 +3980,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "bytes", ] @@ -4176,7 +4176,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "arbitrary", "c-kzg", @@ -4226,9 +4226,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libflate" @@ -4816,7 +4816,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4874,7 +4874,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "git-version", "target_info", @@ -4953,7 +4953,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -5001,7 +5001,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "fnv", ] @@ -5077,7 +5077,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5119,7 +5119,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "prometheus", ] @@ -5171,9 +5171,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] @@ -5484,9 +5484,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -5828,7 +5828,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.7", + "thiserror 2.0.8", "ucd-trie", ] @@ -5961,7 +5961,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "reqwest", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6222,7 +6222,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.7", + "thiserror 2.0.8", "tokio", "tracing", ] @@ -6241,7 +6241,7 @@ dependencies = [ "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.7", + "thiserror 2.0.8", "tinyvec", "tracing", "web-time", @@ -6249,9 +6249,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52cd4b1eff68bf27940dd39811292c49e007f4d0b4c357358dc9b0197be6b527" +checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" dependencies = [ "cfg_aliases", "libc", @@ -6889,7 +6889,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" [[package]] name = "salsa20" @@ -7019,9 +7019,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -7063,7 +7063,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "serde", "url", @@ -7685,7 +7685,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -7814,7 +7814,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "async-channel", "futures", @@ -7872,7 +7872,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "quote", "syn 1.0.109", @@ -7889,11 +7889,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93605438cbd668185516ab499d589afb7ee1859ea3d5fc8f6b0755e1c7443767" +checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" dependencies = [ - "thiserror-impl 2.0.7", + "thiserror-impl 2.0.8", ] [[package]] @@ -7909,9 +7909,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.7" +version = "2.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d8749b4531af2117677a5fcd12b1348a3fe2b81e36e61ffeac5c4aa3273e36" +checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" dependencies = [ "proc-macro2", "quote", @@ -8008,9 +8008,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -8387,7 +8387,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8565,7 +8565,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#1de498340c5166e4abbc3de12dae4af6dab7c6c3" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", diff --git a/anchor/common/ssv_types/src/sql_conversions.rs b/anchor/common/ssv_types/src/sql_conversions.rs index bc7292e0..37a7422b 100644 --- a/anchor/common/ssv_types/src/sql_conversions.rs +++ b/anchor/common/ssv_types/src/sql_conversions.rs @@ -1,6 +1,6 @@ use crate::{Cluster, ClusterId, ClusterMember}; -use crate::{OperatorId, Operator}; -use crate::{Share, ValidatorMetadata, ValidatorIndex}; +use crate::{Operator, OperatorId}; +use crate::{Share, ValidatorIndex, ValidatorMetadata}; use base64::prelude::*; use openssl::rsa::Rsa; use rusqlite::{types::Type, Error as SqlError, Row}; diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 4334b4ee..3622d79b 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -1,4 +1,4 @@ -use super::{DatabaseError, NetworkDatabase, SqlStatement, SQL}; +use super::{DatabaseError, NetworkDatabase, NonUniqueIndex, SqlStatement, UniqueIndex, SQL}; use rusqlite::params; use ssv_types::{Cluster, ClusterId, Share, ValidatorMetadata}; use std::sync::atomic::Ordering; @@ -92,6 +92,13 @@ impl NetworkDatabase { status, // status of the cluster (liquidated or active) *cluster_id // Id of the cluster ])?; + + // get and update the cluster if we are a part of it + if let Some(mut cluster) = self.state.multi_state.clusters.get_by(&cluster_id) { + cluster.liquidated = status; + self.state.multi_state.clusters.update(&cluster_id, cluster); + } + Ok(()) } @@ -103,7 +110,26 @@ impl NetworkDatabase { conn.prepare_cached(SQL[&SqlStatement::DeleteValidator])? .execute(params![validator_pubkey.to_string()])?; - // todo!() remove all the relevant information from in memory stores + // remove the validators share and its metadata + self.state.multi_state.shares.remove(&validator_pubkey); + let metadata = self + .state + .multi_state + .validator_metadata + .remove(&validator_pubkey) + .expect("Data should have existed"); + + // if this cluster no longer contains any validators, remove it from the cluster map + if self + .state + .multi_state + .validator_metadata + .get_all_by(&metadata.cluster_id) + .is_none() + { + self.state.multi_state.clusters.remove(&metadata.cluster_id); + } + Ok(()) } } diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index b783140f..68073a3f 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -3,91 +3,63 @@ use super::test_prelude::*; #[cfg(test)] mod cluster_database_tests { use super::*; - /* #[test] // Test inserting a cluster into the database fn test_insert_retrieve_cluster() { let fixture = TestFixture::new(); - assertions::assert_cluster_exists_in_db(&fixture.db, &fixture.cluster); - assertions::assert_cluster_exists_in_store(&fixture.db, &fixture.cluster); + assertions::cluster::exists_in_db(&fixture.db, &fixture.cluster); + assertions::cluster::exists_in_memory(&fixture.db, &fixture.cluster); } #[test] - // Try inserting a cluster that does not already have registers operators in the database - fn test_insert_cluster_without_operators() { - let fixture = TestFixture::new_empty(); - let cluster = generators::cluster::random(3); - fixture + // Test updating the fee recipient + fn test_update_fee_recipient() { + let fixture = TestFixture::new(); + let mut cluster = fixture.cluster; + let new_fee_recipient = Address::random(); + + // Update fee recipient + assert!(fixture .db - .insert_cluster(cluster) - .expect_err("Insertion should fail"); + .update_fee_recipient(cluster.owner, new_fee_recipient) + .is_ok()); + + //assertions will compare the data + cluster.fee_recipient = new_fee_recipient; + assertions::cluster::exists_in_db(&fixture.db, &cluster); + assertions::cluster::exists_in_memory(&fixture.db, &cluster); } #[test] - // Test deleting a cluster and make sure that it is properly cleaned up - fn test_delete_cluster() { - let fixture = TestFixture::new(); + // Try inserting a cluster that does not already have registers operators in the database + fn test_insert_cluster_without_operators() { + let fixture = TestFixture::new_empty(); + let cluster = generators::cluster::random(4); + let metadata = generators::validator::random_metadata(cluster.cluster_id); + let shares = vec![generators::share::random(cluster.cluster_id, OperatorId(1))]; fixture .db - .delete_cluster(fixture.cluster.cluster_id) - .expect("Failed to delete cluster"); - assertions::assert_cluster_exists_not_in_db(&fixture.db, &fixture.cluster); - assertions::assert_cluster_exists_not_in_store(&fixture.db, &fixture.cluster); + .insert_validator(cluster, metadata, shares) + .expect_err("Insertion should fail"); } #[test] // Test updating the operational status of the cluster fn test_update_cluster_status() { let fixture = TestFixture::new(); - let cluster_id = fixture.cluster.cluster_id; + let mut cluster = fixture.cluster; // Test updating to liquidated fixture .db - .update_status(cluster_id, true) + .update_status(cluster.cluster_id, true) .expect("Failed to update cluster status"); - // Verify both in memory and database - let (_, _, liquidated) = - queries::get_cluster(&fixture.db, cluster_id).expect("Cluster not found"); - assert!(liquidated, "Cluster should be liquidated"); - } - - #[test] - // Test inserting two clusters that an operator is a member of - fn test_insert_two_clusters() { - let fixture = TestFixture::new_empty(); - let us_pubkey = fixture.pubkey; - let us_operator = generators::operator::with_pubkey(us_pubkey); - - //generate a few more operators then add us into the group - let mut operators: Vec = (0..3).map(generators::operator::with_id).collect(); - operators.push(us_operator); - - // inset all of the operators - for op in &operators { - fixture - .db - .insert_operator(op) - .expect("Failed to insert operator"); - } - - // generate and insert 2 clusters - let cluster1 = generators::cluster::with_operators(&operators); - let cluster2 = generators::cluster::with_operators(&operators); - for c in [cluster1.clone(), cluster2.clone()] { - fixture - .db - .insert_cluster(c) - .expect("Failed to insert cluster"); - } - - // make sure they are in the db and state store is expected - assertions::assert_cluster_exists_in_db(&fixture.db, &cluster1); - assertions::assert_cluster_exists_in_db(&fixture.db, &cluster2); - assertions::assert_cluster_exists_in_store(&fixture.db, &cluster1); - assertions::assert_cluster_exists_in_store(&fixture.db, &cluster2); + // verify in memory and db + cluster.liquidated = true; + assertions::cluster::exists_in_db(&fixture.db, &cluster); + assertions::cluster::exists_in_memory(&fixture.db, &cluster); } #[test] @@ -96,8 +68,7 @@ mod cluster_database_tests { let fixture = TestFixture::new(); fixture .db - .insert_cluster(fixture.cluster) + .insert_validator(fixture.cluster, fixture.validator, fixture.shares) .expect_err("Expected failure when inserting cluster that already exists"); } - */ } diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs index aa127188..5f9af5aa 100644 --- a/anchor/database/src/tests/mod.rs +++ b/anchor/database/src/tests/mod.rs @@ -6,12 +6,11 @@ mod validator_tests; pub mod test_prelude { pub use super::utils::*; + pub use crate::multi_index::{NonUniqueIndex, UniqueIndex}; pub use crate::NetworkDatabase; pub use ssv_types::*; pub use tempfile::tempdir; pub use types::{Address, Graffiti, PublicKey}; - pub use crate::multi_index::{UniqueIndex, NonUniqueIndex}; - pub use crate::sql_operations::{SqlStatement, SQL}; } #[cfg(test)] diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 8faf4d71..50926278 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -4,7 +4,6 @@ use super::test_prelude::*; mod state_database_tests { use super::*; - /* #[test] // Test that the previously inserted operators are present after restart fn test_operator_store() { @@ -18,8 +17,8 @@ mod state_database_tests { // confirm that all of the operators exist for operator in &fixture.operators { - assertions::assert_operator_exists_in_db(&fixture.db, operator); - assertions::assert_operator_exists_in_store(&fixture.db, operator); + assertions::operator::exists_in_db(&fixture.db, operator); + assertions::operator::exists_in_memory(&fixture.db, operator); } } @@ -36,23 +35,20 @@ mod state_database_tests { .expect("Failed to create database"); // Confirm all cluster related data is still correct - assertions::assert_cluster_exists_in_db(&fixture.db, &cluster); - assertions::assert_cluster_exists_in_store(&fixture.db, &cluster); + assertions::cluster::exists_in_db(&fixture.db, &cluster); + assertions::cluster::exists_in_memory(&fixture.db, &cluster); } #[test] // Test that you can update and retrieve a block number fn test_block_number() { let fixture = TestFixture::new(); - assert_eq!(fixture.db.read_state(|state| state.last_processed_block), 0); + assert_eq!(fixture.db.get_last_processed_block(), 0); fixture .db .processed_block(10) .expect("Failed to update the block number"); - assert_eq!( - fixture.db.read_state(|state| state.last_processed_block), - 10 - ); + assert_eq!(fixture.db.get_last_processed_block(), 10); } #[test] @@ -69,5 +65,4 @@ mod state_database_tests { .expect("Failed to create database"); assert_eq!(fixture.db.get_last_processed_block(), 10); } - */ } diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index cdcb5437..ec8708c6 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -1,4 +1,5 @@ use super::test_prelude::*; +use crate::{SqlStatement, SQL}; use openssl::pkey::Public; use openssl::rsa::Rsa; use rand::Rng; @@ -16,6 +17,8 @@ const DEFAULT_SEED: [u8; 16] = [42; 16]; pub struct TestFixture { pub db: NetworkDatabase, pub cluster: Cluster, + pub validator: ValidatorMetadata, + pub shares: Vec, pub operators: Vec, pub path: PathBuf, pub pubkey: Rsa, @@ -46,18 +49,21 @@ impl TestFixture { // Build cluster, shares, and validator data let cluster = generators::cluster::with_operators(&operators); let validator = generators::validator::random_metadata(cluster.cluster_id); - let shares = operators + let shares: Vec = operators .iter() .map(|op| generators::share::random(cluster.cluster_id, op.id)) .collect(); - db.insert_validator(cluster.clone(), validator, shares) + db.insert_validator(cluster.clone(), validator.clone(), shares.clone()) .expect("Failed to insert cluster"); + println!("all done"); Self { db, cluster, operators, + validator, + shares, path: db_path, pubkey: us, _temp_dir: temp_dir, @@ -76,6 +82,8 @@ impl TestFixture { db, cluster: generators::cluster::random(0), operators: Vec::new(), + validator: generators::validator::random_metadata(ClusterId(1)), + shares: Vec::new(), path: db_path, pubkey, _temp_dir: temp_dir, @@ -105,8 +113,7 @@ pub mod generators { pub mod cluster { use super::*; - // Generate a fully cluster with a configurable number of operators - // Generate a random cluster + // Generate a random cluster with a specific number of operators pub fn random(num_operators: u64) -> Cluster { let cluster_id = ClusterId(rand::thread_rng().gen::().into()); let members = (0..num_operators).map(OperatorId).collect(); @@ -141,8 +148,7 @@ pub mod generators { pub mod member { use super::*; - - // Generate a new cluster member for a cluster and operator + // Generate a new Cluster Member pub fn new(cluster_id: ClusterId, operator_id: OperatorId) -> ClusterMember { ClusterMember { operator_id, @@ -153,7 +159,6 @@ pub mod generators { pub mod share { use super::*; - // Generate a random keyshare pub fn random(cluster_id: ClusterId, operator_id: OperatorId) -> Share { Share { @@ -205,90 +210,103 @@ pub mod generators { pub mod queries { use super::*; + // Single selection query statements + const GET_OPERATOR: &str = + "SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1"; + const GET_CLUSTER: &str = "SELECT cluster_id, owner, fee_recipient, faulty, liquidated FROM clusters WHERE cluster_id = ?1"; + const GET_SHARES: &str = "SELECT validator_pubkey, cluster_id, operator_id, share_pubkey FROM shares WHERE cluster_id = ?1"; + const GET_VALIDATOR: &str = "SELECT validator_pubkey, cluster_id, validator_index, graffiti FROM validators WHERE validator_pubkey = ?1"; + const GET_MEMBERS: &str = "SELECT operator_id FROM cluster_members WHERE cluster_id = ?1"; + // Get an operator from the database pub fn get_operator(db: &NetworkDatabase, id: OperatorId) -> Option { let conn = db.connection().unwrap(); - let operators = conn.prepare("SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1") - .unwrap() - .query_row(params![*id], |row| Ok(row.try_into().unwrap())) + let mut stmt = conn + .prepare(GET_OPERATOR) + .expect("Failed to prepare statement"); + let operators = stmt + .query_row(params![*id], |row| { + let operator = Operator::try_from(row).expect("Failed to create operator"); + Ok(operator) + }) .ok(); operators } // Get a Cluster from the database - pub fn get_cluster(db: &NetworkDatabase, id: ClusterId) -> Option<(i64, i64, bool)> { + pub fn get_cluster(db: &NetworkDatabase, id: ClusterId) -> Option { + let members = get_cluster_members(db, id).expect("Cluster members should exist"); let conn = db.connection().unwrap(); - - let cluster = conn - .prepare("SELECT cluster_id, faulty, liquidated FROM clusters WHERE cluster_id = ?1") - .unwrap() + let mut stmt = conn + .prepare(GET_CLUSTER) + .expect("Failed to prepare statement"); + let cluster = stmt .query_row(params![*id], |row| { - Ok((row.get(0)?, row.get(1)?, row.get(2)?)) + let cluster = Cluster::try_from((row, members))?; + Ok(cluster) }) - .optional() - .unwrap(); + .ok(); cluster } // Get a share from the database - pub fn get_shares( - db: &NetworkDatabase, - cluster_id: ClusterId, - ) -> Vec<(String, i64, i64, Option)> { + pub fn get_shares(db: &NetworkDatabase, cluster_id: ClusterId) -> Option> { let conn = db.connection().unwrap(); - let mut stmt = conn - .prepare("SELECT validator_pubkey, cluster_id, operator_id, share_pubkey FROM shares WHERE cluster_id = ?1") - .unwrap(); - let shares = stmt + .prepare(GET_SHARES) + .expect("Failed to prepare statement"); + let shares: Result, _> = stmt .query_map(params![*cluster_id], |row| { - Ok(( - row.get(0).unwrap(), - row.get(1).unwrap(), - row.get(2).unwrap(), - row.get(3).unwrap(), - )) + let share = Share::try_from(row)?; + Ok(share) }) - .unwrap() - .map(|r| r.unwrap()) + .ok()? .collect(); - shares + match shares { + Ok(vec) if !vec.is_empty() => Some(vec), + _ => None, + } } // Get a ClusterMember from the database - pub fn get_cluster_member( + fn get_cluster_members( db: &NetworkDatabase, cluster_id: ClusterId, - operator_id: OperatorId, ) -> Option> { - let conn = db.connection().expect("Failed to get a DB connection"); + let conn = db.connection().unwrap(); let mut stmt = conn .prepare(SQL[&SqlStatement::GetClusterMembers]) .expect("Failed to prepare statement"); - let members = stmt + let members: Result, _> = stmt .query_map([cluster_id.0], |row| { Ok(ClusterMember { operator_id: OperatorId(row.get(0)?), cluster_id, }) }) - .optional() - .unwrap(); - - members.collect() + .ok()? + .collect(); + match members { + Ok(vec) if !vec.is_empty() => Some(vec), + _ => None, + } } // Get ValidatorMetadata from the database pub fn get_validator( db: &NetworkDatabase, validator_pubkey: &str, - ) -> Option<(String, i64, String, String, i64)> { + ) -> Option { let conn = db.connection().unwrap(); - let validator = conn.prepare("SELECT validator_pubkey, cluster_id, owner, fee_recipient, validator_index FROM validators WHERE validator_pubkey = ?1") - .unwrap() - .query_row(params![validator_pubkey], |row| Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?))) - .optional() - .unwrap(); + let mut stmt = conn + .prepare(GET_VALIDATOR) + .expect("Failed to prepare statement"); + let validator = stmt + .query_row(params![validator_pubkey], |row| { + let validator = ValidatorMetadata::try_from(row)?; + Ok(validator) + }) + .ok(); validator } } @@ -347,195 +365,112 @@ pub mod assertions { // All validator related assertions pub mod validator { use super::*; - } - - /* - - // Verifies that the cluster does not exist in the state store - pub fn assert_cluster_exists_not_in_store(db: &NetworkDatabase, cluster: &Cluster) { - // Just make sure we have 0 references to the cluster_id - db.read_state(|state| { - let cluster_id = cluster.cluster_id; - assert!(!state.clusters.contains(&cluster_id)); - assert!(!state.shares.contains_key(&cluster_id)); - assert!(!state.validator_metadata.contains_key(&cluster_id)); - assert!(!state.cluster_members.contains_key(&cluster_id)); - assert!(!state.cluster_members.contains_key(&cluster_id)); - }); + fn data(v1: &ValidatorMetadata, v2: &ValidatorMetadata) { + assert_eq!(v1.cluster_id, v2.cluster_id); + assert_eq!(v1.graffiti, v2.graffiti); + assert_eq!(v1.index, v2.index); + assert_eq!(v1.public_key, v2.public_key); } - - // Verifies that the cluster exists correctly in the state store - pub fn assert_cluster_exists_in_store(db: &NetworkDatabase, cluster: &Cluster) { - // - operators: HashMap, - // Verify all operators exist and are cluster members - db.read_state(|state| { - let operator_ids: Vec = cluster - .cluster_members - .iter() - .map(|c| c.operator_id) - .collect(); - - for id in operator_ids { - // Check operator exists - assert!( - db.operator_exists(&id), - "Operator {} not found in database", - *id - ); - - // - cluster_members: HashMap>, - // Check operator is recorded as cluster member - assert!( - state.cluster_members[&cluster.cluster_id].contains(&id), - "Operator {} not recorded as cluster member in memory state", - *id - ); - } - - // - clusters: HashSet, - // Verify cluster is recorded in memory state - assert!( - state.clusters.contains(&cluster.cluster_id), - "Cluster ID not found in memory state" - ); - - // - shares: HashMap, - // Verify shares exists and share data matches if we're a member - if let Some(our_id) = state.id { - if let Some(our_member) = cluster - .cluster_members - .iter() - .find(|m| m.operator_id == our_id) - { - let stored_share = state.shares[&cluster.cluster_id].clone(); - assert_eq!( - stored_share.share_pubkey, our_member.share.share_pubkey, - "Share public key mismatch" - ); - assert_eq!( - stored_share.encrypted_private_key, our_member.share.encrypted_private_key, - "Encrypted private key mismatch" - ); - } - } - assert!( - state.shares.contains_key(&cluster.cluster_id), - "No share found for cluster" - ); - - // - validator_metadata: HashMap, - // Verify validator metadata matches - let validator_metadata = db - .get_validator_metadata(&cluster.cluster_id) - .expect("Failed to get metadata") - .clone(); - assert_eq!( - validator_metadata.owner, cluster.validator_metadata.owner, - "Validator owner mismatch" - ); - assert_eq!( - validator_metadata.validator_index, cluster.validator_metadata.validator_index, - "Validator index mismatch" - ); - assert_eq!( - validator_metadata.fee_recipient, cluster.validator_metadata.fee_recipient, - "Fee recipient mismatch" - ); - assert_eq!( - validator_metadata.graffiti, cluster.validator_metadata.graffiti, - "Graffiti mismatch" - ); - }); + // Verifies that the cluster is in memory + pub fn exists_in_memory(db: &NetworkDatabase, v: &ValidatorMetadata) { + let stored_validator = db + .state + .multi_state + .validator_metadata + .get_by(&v.public_key) + .expect("Metadata should exist"); + data(v, &stored_validator); } - // Database (Persistent Storage) Assertions - // These assertions verify the persistent state in the SQLite dataabase - - - // Verifies that a cluster exists in the database - pub fn assert_cluster_exists_in_db(db: &NetworkDatabase, cluster: &Cluster) { - // Check cluster base data - let (id, faulty, liquidated) = - queries::get_cluster(db, cluster.cluster_id).expect("Cluster not found in database"); + // Verifies that the cluster is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, v: &ValidatorMetadata) { + let stored_validator = db + .state + .multi_state + .validator_metadata + .get_by(&v.public_key); + assert!(stored_validator.is_none()); + } - assert_eq!(id as u64, *cluster.cluster_id, "Cluster ID mismatch"); - assert_eq!( - faulty as u64, cluster.faulty, - "Cluster faulty count mismatch" - ); - assert_eq!( - liquidated, cluster.liquidated, - "Cluster liquidated status mismatch" - ); + // Verify that the cluster is in the database + pub fn exists_in_db(db: &NetworkDatabase, v: &ValidatorMetadata) { + let db_validator = queries::get_validator(db, &v.public_key.to_string()) + .expect("Validator should exist"); + data(v, &db_validator); + } - // Verify cluster members - for member in &cluster.cluster_members { - let member_exists = - queries::get_cluster_member(db, member.cluster_id, member.operator_id) - .expect("Cluster member not found in database"); - - assert_eq!( - member_exists.0 as u64, *member.cluster_id, - "Cluster member cluster ID mismatch" - ); - assert_eq!( - member_exists.1 as u64, *member.operator_id, - "Cluster member operator ID mismatch" - ); - } + // Verify that the cluster does not exist in the database + pub fn exists_not_in_db(db: &NetworkDatabase, v: &ValidatorMetadata) { + let db_validator = queries::get_validator(db, &v.public_key.to_string()); + assert!(db_validator.is_none()); + } + } - // Verify shares - let shares = queries::get_shares(db, cluster.cluster_id); - assert!(!shares.is_empty(), "No shares found for cluster"); + // Cluster assetions + pub mod cluster { + use super::*; + fn data(c1: &Cluster, c2: &Cluster) { + assert_eq!(c1.cluster_id, c2.cluster_id); + assert_eq!(c1.owner, c2.owner); + assert_eq!(c1.fee_recipient, c2.fee_recipient); + assert_eq!(c1.faulty, c2.faulty); + assert_eq!(c1.liquidated, c2.liquidated); + assert_eq!(c1.cluster_members, c2.cluster_members); + } + // Verifies that the cluster is in memory + pub fn exists_in_memory(db: &NetworkDatabase, c: &Cluster) { + let stored_cluster = db + .state + .multi_state + .clusters + .get_by(&c.cluster_id) + .expect("Cluster should exist"); + data(c, &stored_cluster) + } - // Verify validator metadata - let validator = - queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) - .expect("Validator not found in database"); + // Verifies that the cluster is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, cluster_id: ClusterId) { + let stored_cluster = db.state.multi_state.clusters.get_by(&cluster_id); + assert!(stored_cluster.is_none()); + } - assert_eq!( - validator.0, - cluster.validator_metadata.validator_pubkey.to_string(), - "Validator pubkey mismatch" - ); - assert_eq!( - validator.1 as u64, *cluster.cluster_id, - "Validator cluster ID mismatch" - ); - assert_eq!( - validator.2, - cluster.validator_metadata.owner.to_string(), - "Validator owner mismatch" - ); + // Verify that the cluster is in the database + pub fn exists_in_db(db: &NetworkDatabase, c: &Cluster) { + let db_cluster = + queries::get_cluster(db, c.cluster_id).expect("Cluster not found in database"); + data(c, &db_cluster); } - // Verifies that a cluster does not exist in the database - pub fn assert_cluster_exists_not_in_db(db: &NetworkDatabase, cluster: &Cluster) { - // Verify cluster base data is gone + // Verify that the cluster does not exist in the database + pub fn exists_not_in_db(db: &NetworkDatabase, cluster_id: ClusterId) { + // Check database assert!( - queries::get_cluster(db, cluster.cluster_id).is_none(), - "Cluster still exists in database" + queries::get_cluster(db, cluster_id).is_none(), + "Cluster exists in database" ); + } + } - // Verify all cluster members are gone - for member in &cluster.cluster_members { - assert!( - queries::get_cluster_member(db, member.cluster_id, member.operator_id).is_none(), - "Cluster member still exists in database" - ); - } + // + pub mod share { + use super::*; + fn data(s1: &Share, s2: &Share) { + assert_eq!(s1.cluster_id, s2.cluster_id); + assert_eq!(s1.encrypted_private_key, s2.encrypted_private_key); + assert_eq!(s1.operator_id, s2.operator_id); + assert_eq!(s1.share_pubkey, s2.share_pubkey); + } + // Verifies that the share is in memory + pub fn exists_in_memory(db: &NetworkDatabase, s: &Share) {} - // Verify all shares are gone - let shares = queries::get_shares(db, cluster.cluster_id); - assert!(shares.is_empty(), "Shares still exist for cluster"); + // Verifies that the share is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, s: &Share) {} - // Verify validator metadata is gone - assert!( - queries::get_validator(db, &cluster.validator_metadata.validator_pubkey.to_string()) - .is_none(), - "Validator still exists in database" - ); - } - */ + // Verify that the share is in the database + pub fn exists_in_db(db: &NetworkDatabase, s: &Share) {} + + // Verify that the share does not exist in the database + pub fn exists_not_in_db(db: &NetworkDatabase, s: &Share) {} + } } diff --git a/anchor/database/src/tests/validator_tests.rs b/anchor/database/src/tests/validator_tests.rs index 5b8cc7de..7cda1b9a 100644 --- a/anchor/database/src/tests/validator_tests.rs +++ b/anchor/database/src/tests/validator_tests.rs @@ -4,94 +4,23 @@ use super::test_prelude::*; mod validator_database_tests { use super::*; - #[test] - // Test updating the fee recipient - fn test_update_fee_recipient() { - let fixture = TestFixture::new(); - let cluster = &fixture.cluster; - let new_fee_recipient = Address::random(); - - // Update fee recipient - assert!(fixture - .db - .update_fee_recipient(cluster.owner, new_fee_recipient) - .is_ok()); - - // Assert data has changed in memory and database - let memory_cluster = fixture - .db - .state - .multi_state - .clusters - .get_by(&cluster.cluster_id) - .expect("Cluster shoulde exist"); - assert_eq!( - memory_cluster.fee_recipient, new_fee_recipient, - "Fee recipient was not updated" - ); - - - - // - - /* - - // Verify update in database - ) - .expect("Validator not found in database"); - assert_eq!( - validator.3, - new_address.to_string(), - "Fee recipient not updated in database" - ); - */ - } - /* - #[test] /// Test updating the graffiti of a validator fn test_update_graffiti() { let fixture = TestFixture::new(); - let cluster = &fixture.cluster; - let new_graffiti = Graffiti::default(); // Or create a specific test graffiti + let new_graffiti = Graffiti::default(); + let mut validator = fixture.validator; - // Update graffiti - fixture - .db - .update_graffiti( - cluster.cluster_id, - cluster.validator_metadata.validator_pubkey.clone(), - new_graffiti, - ) - .expect("Failed to update graffiti"); - - // Verify update in memory state - let metadata = &fixture + // update the graffiti + assert!(fixture .db - .get_validator_metadata(&cluster.cluster_id) - .expect("Failed to get cluster metadata"); - assert_eq!( - metadata.graffiti, new_graffiti, - "Graffiti not updated in memory" - ); - } - - #[test] - /// Test updating the fee recipient of a validator that does not exist - fn test_update_validator_nonexistent_cluster() { - let fixture = TestFixture::new(); - let nonexistent_cluster_id = ClusterId(*fixture.cluster.cluster_id + 1); - - let result = fixture.db.update_fee_recipient( - nonexistent_cluster_id, - fixture.cluster.validator_metadata.validator_pubkey.clone(), - Address::random(), - ); + .update_graffiti(&validator.public_key, new_graffiti) + .is_ok()); - assert!( - result.is_err(), - "Should fail when updating non-existent cluster" - ); + // confirm that it has changed both in the db and memory + // exists call will also check data values + validator.graffiti = new_graffiti; + assertions::validator::exists_in_db(&fixture.db, &validator); + assertions::validator::exists_in_memory(&fixture.db, &validator); } - */ } diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index ef870bd6..1a9db1a3 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -13,8 +13,8 @@ impl NetworkDatabase { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? .execute(params![ - fee_recipient.to_string(), // new fee recipient address for entire cluster - owner.to_string() // owner of the cluster + fee_recipient.to_string(), // new fee recipient address for entire cluster + owner.to_string() // owner of the cluster ])?; // if we are in the cluster, update the in memory fee recipient for the cluster @@ -32,7 +32,7 @@ impl NetworkDatabase { /// Update the graffiti for a validator pub fn update_graffiti( &self, - validator_pubkey: PublicKey, + validator_pubkey: &PublicKey, graffiti: Graffiti, ) -> Result<(), DatabaseError> { // Update the database From c5d1ff1297667e000c5e238476432645a7c5a300 Mon Sep 17 00:00:00 2001 From: Zachary Holme Date: Sat, 21 Dec 2024 10:54:56 -0600 Subject: [PATCH 33/50] fix up testing --- anchor/database/src/cluster_operations.rs | 1 + anchor/database/src/multi_index.rs | 214 ++++++++++++-------- anchor/database/src/state.rs | 15 +- anchor/database/src/tests/cluster_tests.rs | 20 ++ anchor/database/src/tests/state_tests.rs | 4 +- anchor/database/src/tests/utils.rs | 69 +++++-- anchor/database/src/validator_operations.rs | 8 +- 7 files changed, 214 insertions(+), 117 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 3622d79b..1002fb1a 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -128,6 +128,7 @@ impl NetworkDatabase { .is_none() { self.state.multi_state.clusters.remove(&metadata.cluster_id); + self.state.single_state.clusters.remove(&metadata.cluster_id); } Ok(()) diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs index ae9e5a11..106bcbe7 100644 --- a/anchor/database/src/multi_index.rs +++ b/anchor/database/src/multi_index.rs @@ -279,97 +279,139 @@ where } #[cfg(test)] -mod tests { +mod multi_index_tests { use super::*; - use crate::tests::test_prelude::generators; - use ssv_types::{Cluster, ClusterId, OperatorId, Share}; - use types::{Address, PublicKey}; + + #[derive(Clone, Debug, PartialEq)] + struct TestValue { + id: i32, + data: String, + } + + #[test] + fn test_basic_operations() { + let map: MultiIndexMap = MultiIndexMap::new(); + + let value = TestValue { + id: 1, + data: "test".to_string(), + }; + + // Test insertion + map.insert(&1, &"key1".to_string(), &true, value.clone()); + + // Test primary key access + assert_eq!(map.get_by(&1), Some(value.clone())); + + // Test secondary key access + assert_eq!(map.get_by(&"key1".to_string()), Some(value.clone())); + + // Test tertiary key access + assert_eq!(map.get_by(&true), Some(value.clone())); + + // Test update + let new_value = TestValue { + id: 1, + data: "updated".to_string(), + }; + map.update(&1, new_value.clone()); + assert_eq!(map.get_by(&1), Some(new_value.clone())); + + // Test removal + assert_eq!(map.remove(&1), Some(new_value.clone())); + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&"key1".to_string()), None); + assert_eq!(map.get_by(&true), None); + } + + #[test] + fn test_non_unique_indices() { + let map: MultiIndexMap = MultiIndexMap::new(); + + let value1 = TestValue { + id: 1, + data: "test1".to_string(), + }; + let value2 = TestValue { + id: 2, + data: "test2".to_string(), + }; + + // Insert multiple values with same secondary and tertiary keys + map.insert(&1, &"shared_key".to_string(), &true, value1.clone()); + map.insert(&2, &"shared_key".to_string(), &true, value2.clone()); + + // Test primary key access (still unique) + assert_eq!(map.get_by(&1), Some(value1.clone())); + assert_eq!(map.get_by(&2), Some(value2.clone())); + + // Test secondary key access (non-unique) + let secondary_values = map.get_all_by(&"shared_key".to_string()).unwrap(); + assert_eq!(secondary_values.len(), 2); + assert!(secondary_values.contains(&value1)); + assert!(secondary_values.contains(&value2)); + + // Test tertiary key access (non-unique) + let tertiary_values = map.get_all_by(&true).unwrap(); + assert_eq!(tertiary_values.len(), 2); + assert!(tertiary_values.contains(&value1)); + assert!(tertiary_values.contains(&value2)); + + // Test removal maintains other entries + map.remove(&1); + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&2), Some(value2.clone())); + + let remaining_secondary = map.get_all_by(&"shared_key".to_string()).unwrap(); + assert_eq!(remaining_secondary.len(), 1); + assert_eq!(remaining_secondary[0], value2); + } #[test] - fn test_nonunique() { - let cluster_id = ClusterId(10); - let operator_id = OperatorId(10); - let owner = Address::random(); - - // Shares with different public keys, but same cluster id and owner - let share_1 = generators::share::random(cluster_id, operator_id); - let pk_1 = generators::pubkey::random(); - let share_2 = generators::share::random(cluster_id, operator_id); - let pk_2 = generators::pubkey::random(); - - // A MultiIndexMap for accessing Shares - // Primary Key: validator public key which uniquly identifies a share - // Secondary Key: cluster id which does not uniquely identify a share (NonUniqueTag) - // Tertiary Key: owner address which does not uniquely identify a share (NonUniqueTag) - let map: MultiIndexMap = - MultiIndexMap::new(); - - // insert the data - map.insert(&pk_1, &cluster_id, &owner, share_1); - map.insert(&pk_2, &cluster_id, &owner, share_2); - - // This does not compile - // let shares = map.get_all_by(&pk_1); - - // This does compile - let share_1 = map.get_by(&pk_1); - assert!(share_1.is_some()); - - // This does not compile since we enforce NonUnique via NonUniqueTag - // let share = map.get_by(&cluster_id); - - // This does compile - let shares = map.get_all_by(&cluster_id).expect("Failed to get shares"); - assert!(shares.len() == 2); - - // Like above, this does not compile - // let share = map.get_by(&owner); - - // This does compile - let shares = map.get_all_by(&owner).expect("Failed to get shares"); - assert!(shares.len() == 2); + fn test_mixed_uniqueness() { + let map: MultiIndexMap = MultiIndexMap::new(); + + let value1 = TestValue { + id: 1, + data: "test1".to_string(), + }; + let value2 = TestValue { + id: 2, + data: "test2".to_string(), + }; + + // Insert values with unique secondary key but shared tertiary key + map.insert(&1, &"key1".to_string(), &true, value1.clone()); + map.insert(&2, &"key2".to_string(), &true, value2.clone()); + + // Test unique secondary key access + assert_eq!(map.get_by(&"key1".to_string()), Some(value1.clone())); + assert_eq!(map.get_by(&"key2".to_string()), Some(value2.clone())); + + // Test non-unique tertiary key access + let tertiary_values = map.get_all_by(&true).unwrap(); + assert_eq!(tertiary_values.len(), 2); + assert!(tertiary_values.contains(&value1)); + assert!(tertiary_values.contains(&value2)); } #[test] - fn test_unique() { - // generate a cluster and its corresponding validator - let cluster = generators::cluster::random(4); - let validator_metadata = generators::validator::random_metadata(cluster.cluster_id); - - // A MultiIndexMap for accessing a cluster - // Primary Key: cluster id that uniquely identifies the cluster - // Secondary Key: validator public key that uniquely identifies this cluster - // Tertiary Key: owner address that uniquely identifies this cluster - let map: MultiIndexMap = - MultiIndexMap::new(); - - // insert the cluster - map.insert( - &cluster.cluster_id, - &validator_metadata.public_key, - &cluster.owner, - cluster.clone(), - ); - - // - Fetch via cluster id - // This does not compile - //let cluster = map.get_all_by(&cluster.cluster_id); - // This does compile - let c = map.get_by(&cluster.cluster_id); - assert!(c.is_some()); - - // - Fetch via public key - // This does not compile - //let cluster = map.get_all_by(&validator_metadata.public_key); - // This does compile due to UniqueTag - let c = map.get_by(&validator_metadata.public_key); - assert!(c.is_some()); - - // - Fetch via owner - // This does not compile - //let cluster = map.get_all_by(&cluster.owner); - // This does compile due to UniqueTag - let c = map.get_by(&cluster.owner); - assert!(c.is_some()); + fn test_empty_cases() { + let map: MultiIndexMap = MultiIndexMap::new(); + + // Test access on empty map + assert_eq!(map.get_by(&1), None); + assert_eq!(map.get_by(&"key".to_string()), None); + assert_eq!(map.get_by(&true), None); + + // Test remove on empty map + assert_eq!(map.remove(&1), None); + + // Test update on empty map + let value = TestValue { + id: 1, + data: "test".to_string(), + }; + assert_eq!(map.update(&1, value), None); } } diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index d258eb9e..259f8101 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -25,9 +25,9 @@ impl NetworkState { // Get the last processed block from the database let last_processed_block = Self::get_last_processed_block(&conn)?; - // Without an Id, we have no idea who we are. Check to see if an operator with our PublicKey - // is stored the database, else we have to wait for it to be processed by the execution - // layer + // Without an ID, we have no idea who we are. Check to see if an operator with our public key + // is stored the database. If it does not exist, that means the operator still has to be registered + // with the network contract or that we have not seen the corresponding event yet let id = if let Ok(Some(operator_id)) = Self::does_self_exist(&conn, pubkey) { operator_id } else { @@ -43,9 +43,10 @@ impl NetworkState { }; // First Phase: Fetch data from the database - // The two main data structures are a map of ClusterId -> Cluster and ClusterID -> - // Vec<(Share, ValidatorMetadata)>. This greatly simplifies data handling and makes it very - // easy to add more customized stores in the future. Also, just fetch the operators + // Two main data structures for state reconstruction + // 1) ClusterId -> Cluster + // 2) ClusterId -> Vec<(Share, ValidatorMetadata)> + // This simplifies data reconstruction and makes it easy to add more customized stores in the future let operators = Self::fetch_operators(&conn)?; let share_validator = Self::fetch_shares_and_validators(&conn, id)?; let clusters = Self::fetch_clusters(&conn, id)?; @@ -191,7 +192,7 @@ impl NetworkState { } } -// Clean interface for accessing Single state data +// Interface for accessing single state data impl NetworkDatabase { /// Get operator data from in-memory store pub fn get_operator(&self, id: &OperatorId) -> Option { diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index 68073a3f..35db030f 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -10,6 +10,26 @@ mod cluster_database_tests { let fixture = TestFixture::new(); assertions::cluster::exists_in_db(&fixture.db, &fixture.cluster); assertions::cluster::exists_in_memory(&fixture.db, &fixture.cluster); + assertions::validator::exists_in_memory(&fixture.db, &fixture.validator); + assertions::validator::exists_in_db(&fixture.db, &fixture.validator); + assertions::share::exists_in_db(&fixture.db, &fixture.validator.public_key, &fixture.shares); + } + + #[test] + // Test deleting the last validator from a cluster and make sure the metadata, + // cluster, cluster members, and shares are all cleaned up + fn test_delete_last_validator() { + let fixture = TestFixture::new(); + let pubkey = fixture.validator.public_key.clone(); + assert!(fixture.db.delete_validator(&pubkey).is_ok()); + + // Since there was only one validator in the cluster, everything should be removed + assertions::cluster::exists_not_in_db(&fixture.db, fixture.cluster.cluster_id); + assertions::cluster::exists_not_in_memory(&fixture.db, fixture.cluster.cluster_id); + assertions::validator::exists_not_in_db(&fixture.db, &fixture.validator); + assertions::validator::exists_not_in_memory(&fixture.db, &fixture.validator); + assertions::share::exists_not_in_db(&fixture.db, &pubkey); + assertions::share::exists_not_in_memory(&fixture.db,&pubkey); } #[test] diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 50926278..efc14c0f 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -34,9 +34,9 @@ mod state_database_tests { fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) .expect("Failed to create database"); - // Confirm all cluster related data is still correct - assertions::cluster::exists_in_db(&fixture.db, &cluster); + // confirm all data is what we expect assertions::cluster::exists_in_memory(&fixture.db, &cluster); + assertions::validator::exists_in_memory(&fixture.db, &fixture.validator); } #[test] diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index ec8708c6..e63ac638 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -1,9 +1,8 @@ use super::test_prelude::*; -use crate::{SqlStatement, SQL}; use openssl::pkey::Public; use openssl::rsa::Rsa; use rand::Rng; -use rusqlite::{params, OptionalExtension}; +use rusqlite::params; use std::path::PathBuf; use tempfile::TempDir; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; @@ -56,7 +55,6 @@ impl TestFixture { db.insert_validator(cluster.clone(), validator.clone(), shares.clone()) .expect("Failed to insert cluster"); - println!("all done"); Self { db, @@ -209,12 +207,13 @@ pub mod generators { // This will extract information corresponding to the original tables pub mod queries { use super::*; + use std::str::FromStr; // Single selection query statements const GET_OPERATOR: &str = "SELECT operator_id, public_key, owner_address FROM operators WHERE operator_id = ?1"; const GET_CLUSTER: &str = "SELECT cluster_id, owner, fee_recipient, faulty, liquidated FROM clusters WHERE cluster_id = ?1"; - const GET_SHARES: &str = "SELECT validator_pubkey, cluster_id, operator_id, share_pubkey FROM shares WHERE cluster_id = ?1"; + const GET_SHARES: &str = "SELECT share_pubkey, encrypted_key, cluster_id, operator_id FROM shares WHERE validator_pubkey = ?1"; const GET_VALIDATOR: &str = "SELECT validator_pubkey, cluster_id, validator_index, graffiti FROM validators WHERE validator_pubkey = ?1"; const GET_MEMBERS: &str = "SELECT operator_id FROM cluster_members WHERE cluster_id = ?1"; @@ -235,7 +234,7 @@ pub mod queries { // Get a Cluster from the database pub fn get_cluster(db: &NetworkDatabase, id: ClusterId) -> Option { - let members = get_cluster_members(db, id).expect("Cluster members should exist"); + let members = get_cluster_members(db, id)?; let conn = db.connection().unwrap(); let mut stmt = conn .prepare(GET_CLUSTER) @@ -250,15 +249,28 @@ pub mod queries { } // Get a share from the database - pub fn get_shares(db: &NetworkDatabase, cluster_id: ClusterId) -> Option> { + pub fn get_shares(db: &NetworkDatabase, pubkey: &PublicKey) -> Option> { let conn = db.connection().unwrap(); let mut stmt = conn .prepare(GET_SHARES) .expect("Failed to prepare statement"); let shares: Result, _> = stmt - .query_map(params![*cluster_id], |row| { - let share = Share::try_from(row)?; - Ok(share) + .query_map(params![pubkey.to_string()], |row| { + + let share_pubkey_str = row.get::<_, String>(0)?; + let share_pubkey = PublicKey::from_str(&share_pubkey_str).unwrap(); + let encrypted_private_key: [u8; 256] = row.get(1)?; + + // Get the OperatorId from column 6 and ClusterId from column 1 + let operator_id = OperatorId(row.get(2)?); + let cluster_id = ClusterId(row.get(3)?); + + Ok(Share { + operator_id, + cluster_id, + share_pubkey, + encrypted_private_key, + }) }) .ok()? .collect(); @@ -275,7 +287,7 @@ pub mod queries { ) -> Option> { let conn = db.connection().unwrap(); let mut stmt = conn - .prepare(SQL[&SqlStatement::GetClusterMembers]) + .prepare(GET_MEMBERS) .expect("Failed to prepare statement"); let members: Result, _> = stmt .query_map([cluster_id.0], |row| { @@ -420,6 +432,7 @@ pub mod assertions { } // Verifies that the cluster is in memory pub fn exists_in_memory(db: &NetworkDatabase, c: &Cluster) { + assert!(db.member_of_cluster(&c.cluster_id) == true); let stored_cluster = db .state .multi_state @@ -431,6 +444,7 @@ pub mod assertions { // Verifies that the cluster is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, cluster_id: ClusterId) { + assert!(db.member_of_cluster(&cluster_id) == false); let stored_cluster = db.state.multi_state.clusters.get_by(&cluster_id); assert!(stored_cluster.is_none()); } @@ -461,16 +475,35 @@ pub mod assertions { assert_eq!(s1.operator_id, s2.operator_id); assert_eq!(s1.share_pubkey, s2.share_pubkey); } - // Verifies that the share is in memory - pub fn exists_in_memory(db: &NetworkDatabase, s: &Share) {} - // Verifies that the share is not in memory - pub fn exists_not_in_memory(db: &NetworkDatabase, s: &Share) {} + // Verifies that a share that belongs to this operator is in memory + pub fn exists_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey, share: &Share) { + let stored_share = db.state.multi_state.shares.get_by(validator_pubkey).expect("Share should exist"); + data(share, &stored_share); + } + + // Verifies that a share is not in memory + pub fn exists_not_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey) { + let db_share = db.state.multi_state.shares.get_by(validator_pubkey); + assert!(db_share.is_none()); + } + + // Verifies that all of the shares for a validator are in the database + pub fn exists_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey, s: &Vec) { + let db_shares = queries::get_shares(db, validator_pubkey).expect("Shares should exist in db"); + // have to pair them up since we dont know what order they will be returned from db in + db_shares.iter().flat_map(|share| { + s.iter().filter(|share2| share.operator_id == share2.operator_id) + .map(move |share2| (share, share2)) + }) + .for_each(|(share, share2)| data(share, share2)); + } - // Verify that the share is in the database - pub fn exists_in_db(db: &NetworkDatabase, s: &Share) {} + // Verifies that all of the shares for a validator are not in the database + pub fn exists_not_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey) { + let shares = queries::get_shares(db, validator_pubkey); + assert!(shares.is_none()); - // Verify that the share does not exist in the database - pub fn exists_not_in_db(db: &NetworkDatabase, s: &Share) {} + } } } diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 1a9db1a3..6841f7d6 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -2,9 +2,9 @@ use crate::{multi_index::UniqueIndex, DatabaseError, NetworkDatabase, SqlStateme use rusqlite::params; use types::{Address, Graffiti, PublicKey}; -/// Implements all validator related db functionality +/// Implements all validator related database functionality impl NetworkDatabase { - /// Update the fee recipient address for a validator + /// Update the fee recipient address for all validators in a cluster pub fn update_fee_recipient( &self, owner: Address, @@ -17,7 +17,7 @@ impl NetworkDatabase { owner.to_string() // owner of the cluster ])?; - // if we are in the cluster, update the in memory fee recipient for the cluster + // If we are in the cluster, update the in memory fee recipient for the cluster if let Some(mut cluster) = self.state.multi_state.clusters.get_by(&owner) { // update recipient and insert back in to update cluster.fee_recipient = fee_recipient; @@ -43,7 +43,7 @@ impl NetworkDatabase { validator_pubkey.to_string() // the public key of the validator ])?; - // If we operate on behalf of the validator, update the in memory state + // If we are an operator for the validator, update the in memory grafitti if let Some(mut validator) = self .state .multi_state From 35a79ec218d347eaaabf1ccc25eaf2bb1bf2eec7 Mon Sep 17 00:00:00 2001 From: Zachary Holme Date: Sat, 21 Dec 2024 11:20:38 -0600 Subject: [PATCH 34/50] clusterId to bytes 32 --- anchor/common/ssv_types/src/cluster.rs | 2 +- anchor/database/Cargo.toml | 2 +- anchor/database/src/cluster_operations.rs | 5 ++- anchor/database/src/multi_index.rs | 32 ++++++++++--------- anchor/database/src/state.rs | 3 +- anchor/database/src/table_schema.sql | 8 ++--- anchor/database/src/tests/cluster_tests.rs | 12 +++++--- anchor/database/src/tests/utils.rs | 36 ++++++---------------- 8 files changed, 47 insertions(+), 53 deletions(-) diff --git a/anchor/common/ssv_types/src/cluster.rs b/anchor/common/ssv_types/src/cluster.rs index 293da6e8..ed21c884 100644 --- a/anchor/common/ssv_types/src/cluster.rs +++ b/anchor/common/ssv_types/src/cluster.rs @@ -5,7 +5,7 @@ use types::{Address, Graffiti, PublicKey}; /// Unique identifier for a cluster #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Hash, From, Deref)] -pub struct ClusterId(pub u64); +pub struct ClusterId(pub [u8; 32]); /// A Cluster is a group of Operators that are acting on behalf of one or more Validators /// diff --git a/anchor/database/Cargo.toml b/anchor/database/Cargo.toml index 21681ada..2f1564d2 100644 --- a/anchor/database/Cargo.toml +++ b/anchor/database/Cargo.toml @@ -6,6 +6,7 @@ authors = ["Sigma Prime "] [dependencies] base64 = { workspace = true } +dashmap = { workspace = true } openssl = { workspace = true } parking_lot = { workspace = true } r2d2 = "0.8.10" @@ -13,7 +14,6 @@ r2d2_sqlite = "0.21.0" rusqlite = { workspace = true } ssv_types = { workspace = true } types = { workspace = true } -dashmap = { workspace = true } [dev-dependencies] rand = "0.8.5" diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 1002fb1a..c0b2044e 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -128,7 +128,10 @@ impl NetworkDatabase { .is_none() { self.state.multi_state.clusters.remove(&metadata.cluster_id); - self.state.single_state.clusters.remove(&metadata.cluster_id); + self.state + .single_state + .clusters + .remove(&metadata.cluster_id); } Ok(()) diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs index 106bcbe7..a7dc846f 100644 --- a/anchor/database/src/multi_index.rs +++ b/anchor/database/src/multi_index.rs @@ -290,8 +290,9 @@ mod multi_index_tests { #[test] fn test_basic_operations() { - let map: MultiIndexMap = MultiIndexMap::new(); - + let map: MultiIndexMap = + MultiIndexMap::new(); + let value = TestValue { id: 1, data: "test".to_string(), @@ -299,13 +300,13 @@ mod multi_index_tests { // Test insertion map.insert(&1, &"key1".to_string(), &true, value.clone()); - + // Test primary key access assert_eq!(map.get_by(&1), Some(value.clone())); - + // Test secondary key access assert_eq!(map.get_by(&"key1".to_string()), Some(value.clone())); - + // Test tertiary key access assert_eq!(map.get_by(&true), Some(value.clone())); @@ -326,8 +327,9 @@ mod multi_index_tests { #[test] fn test_non_unique_indices() { - let map: MultiIndexMap = MultiIndexMap::new(); - + let map: MultiIndexMap = + MultiIndexMap::new(); + let value1 = TestValue { id: 1, data: "test1".to_string(), @@ -361,7 +363,7 @@ mod multi_index_tests { map.remove(&1); assert_eq!(map.get_by(&1), None); assert_eq!(map.get_by(&2), Some(value2.clone())); - + let remaining_secondary = map.get_all_by(&"shared_key".to_string()).unwrap(); assert_eq!(remaining_secondary.len(), 1); assert_eq!(remaining_secondary[0], value2); @@ -369,8 +371,9 @@ mod multi_index_tests { #[test] fn test_mixed_uniqueness() { - let map: MultiIndexMap = MultiIndexMap::new(); - + let map: MultiIndexMap = + MultiIndexMap::new(); + let value1 = TestValue { id: 1, data: "test1".to_string(), @@ -397,16 +400,17 @@ mod multi_index_tests { #[test] fn test_empty_cases() { - let map: MultiIndexMap = MultiIndexMap::new(); - + let map: MultiIndexMap = + MultiIndexMap::new(); + // Test access on empty map assert_eq!(map.get_by(&1), None); assert_eq!(map.get_by(&"key".to_string()), None); assert_eq!(map.get_by(&true), None); - + // Test remove on empty map assert_eq!(map.remove(&1), None); - + // Test update on empty map let value = TestValue { id: 1, diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 259f8101..bfe45756 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -26,7 +26,7 @@ impl NetworkState { let last_processed_block = Self::get_last_processed_block(&conn)?; // Without an ID, we have no idea who we are. Check to see if an operator with our public key - // is stored the database. If it does not exist, that means the operator still has to be registered + // is stored the database. If it does not exist, that means the operator still has to be registered // with the network contract or that we have not seen the corresponding event yet let id = if let Ok(Some(operator_id)) = Self::does_self_exist(&conn, pubkey) { operator_id @@ -163,6 +163,7 @@ impl NetworkState { let clusters = stmt .query_map([*operator_id], |row| { let cluster_id = ClusterId(row.get(0)?); + println!("got here"); // Get all of the members for this cluster let cluster_members = Self::fetch_cluster_members(conn, cluster_id)?; diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 1c37d9b7..5ad23650 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -11,7 +11,7 @@ CREATE TABLE operators ( ); CREATE TABLE clusters ( - cluster_id INTEGER PRIMARY KEY, + cluster_id BLOB PRIMARY KEY, owner TEXT NOT NULL, fee_recipient TEXT NOT NULL, faulty INTEGER DEFAULT 0, @@ -19,7 +19,7 @@ CREATE TABLE clusters ( ); CREATE TABLE cluster_members ( - cluster_id INTEGER NOT NULL, + cluster_id BLOB NOT NULL, operator_id INTEGER NOT NULL, PRIMARY KEY (cluster_id, operator_id), FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) ON DELETE CASCADE, @@ -28,7 +28,7 @@ CREATE TABLE cluster_members ( CREATE TABLE validators ( validator_pubkey TEXT PRIMARY KEY, - cluster_id INTEGER NOT NULL, + cluster_id BLOB NOT NULL, validator_index INTEGER DEFAULT 0, graffiti BLOB DEFAULT X'0000000000000000000000000000000000000000000000000000000000000000', FOREIGN KEY (cluster_id) REFERENCES clusters(cluster_id) @@ -36,7 +36,7 @@ CREATE TABLE validators ( CREATE TABLE shares ( validator_pubkey TEXT NOT NULL, - cluster_id INTEGER NOT NULL, + cluster_id BLOB NOT NULL, operator_id INTEGER NOT NULL, share_pubkey TEXT, encrypted_key BLOB, diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index 35db030f..62550f7f 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -12,24 +12,28 @@ mod cluster_database_tests { assertions::cluster::exists_in_memory(&fixture.db, &fixture.cluster); assertions::validator::exists_in_memory(&fixture.db, &fixture.validator); assertions::validator::exists_in_db(&fixture.db, &fixture.validator); - assertions::share::exists_in_db(&fixture.db, &fixture.validator.public_key, &fixture.shares); + assertions::share::exists_in_db( + &fixture.db, + &fixture.validator.public_key, + &fixture.shares, + ); } #[test] - // Test deleting the last validator from a cluster and make sure the metadata, + // Test deleting the last validator from a cluster and make sure the metadata, // cluster, cluster members, and shares are all cleaned up fn test_delete_last_validator() { let fixture = TestFixture::new(); let pubkey = fixture.validator.public_key.clone(); assert!(fixture.db.delete_validator(&pubkey).is_ok()); - + // Since there was only one validator in the cluster, everything should be removed assertions::cluster::exists_not_in_db(&fixture.db, fixture.cluster.cluster_id); assertions::cluster::exists_not_in_memory(&fixture.db, fixture.cluster.cluster_id); assertions::validator::exists_not_in_db(&fixture.db, &fixture.validator); assertions::validator::exists_not_in_memory(&fixture.db, &fixture.validator); assertions::share::exists_not_in_db(&fixture.db, &pubkey); - assertions::share::exists_not_in_memory(&fixture.db,&pubkey); + assertions::share::exists_not_in_memory(&fixture.db, &pubkey); } #[test] diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index e63ac638..a70dd860 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -75,12 +75,13 @@ impl TestFixture { let pubkey = generators::pubkey::random_rsa(); let db = NetworkDatabase::new(&db_path, &pubkey).expect("Failed to create test database"); + let cluster = generators::cluster::random(0); Self { db, - cluster: generators::cluster::random(0), + validator: generators::validator::random_metadata(cluster.cluster_id), + cluster, operators: Vec::new(), - validator: generators::validator::random_metadata(ClusterId(1)), shares: Vec::new(), path: db_path, pubkey, @@ -97,11 +98,6 @@ pub mod generators { pub mod operator { use super::*; - pub fn with_pubkey(pubkey: Rsa) -> Operator { - let id = OperatorId(rand::thread_rng().gen::().into()); - Operator::new_with_pubkey(pubkey, id, Address::random()) - } - pub fn with_id(id: u64) -> Operator { let public_key = generators::pubkey::random_rsa(); Operator::new_with_pubkey(public_key, OperatorId(id), Address::random()) @@ -113,7 +109,8 @@ pub mod generators { // Generate a random cluster with a specific number of operators pub fn random(num_operators: u64) -> Cluster { - let cluster_id = ClusterId(rand::thread_rng().gen::().into()); + let cluster_id: [u8; 32] = rand::thread_rng().gen(); + let cluster_id = ClusterId(cluster_id); let members = (0..num_operators).map(OperatorId).collect(); let owner_recipient = Address::random(); @@ -129,7 +126,8 @@ pub mod generators { // Generate a cluster with a specific set of operators pub fn with_operators(operators: &[Operator]) -> Cluster { - let cluster_id = ClusterId(rand::thread_rng().gen::().into()); + let cluster_id: [u8; 32] = rand::thread_rng().gen(); + let cluster_id = ClusterId(cluster_id); let members = operators.iter().map(|op| op.id).collect(); let owner_recipient = Address::random(); @@ -144,17 +142,6 @@ pub mod generators { } } - pub mod member { - use super::*; - // Generate a new Cluster Member - pub fn new(cluster_id: ClusterId, operator_id: OperatorId) -> ClusterMember { - ClusterMember { - operator_id, - cluster_id, - } - } - } - pub mod share { use super::*; // Generate a random keyshare @@ -262,8 +249,8 @@ pub mod queries { let encrypted_private_key: [u8; 256] = row.get(1)?; // Get the OperatorId from column 6 and ClusterId from column 1 - let operator_id = OperatorId(row.get(2)?); - let cluster_id = ClusterId(row.get(3)?); + let cluster_id = ClusterId(row.get(2)?); + let operator_id = OperatorId(row.get(3)?); Ok(Share { operator_id, @@ -476,11 +463,6 @@ pub mod assertions { assert_eq!(s1.share_pubkey, s2.share_pubkey); } - // Verifies that a share that belongs to this operator is in memory - pub fn exists_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey, share: &Share) { - let stored_share = db.state.multi_state.shares.get_by(validator_pubkey).expect("Share should exist"); - data(share, &stored_share); - } // Verifies that a share is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey) { From aabbcdf7e0ac4249ff25ff78592d9bf3cc05b0d5 Mon Sep 17 00:00:00 2001 From: Zachary Holme Date: Sat, 21 Dec 2024 11:28:55 -0600 Subject: [PATCH 35/50] lints --- anchor/database/src/tests/mod.rs | 2 +- anchor/database/src/tests/utils.rs | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/anchor/database/src/tests/mod.rs b/anchor/database/src/tests/mod.rs index 5f9af5aa..f8f29a98 100644 --- a/anchor/database/src/tests/mod.rs +++ b/anchor/database/src/tests/mod.rs @@ -6,7 +6,7 @@ mod validator_tests; pub mod test_prelude { pub use super::utils::*; - pub use crate::multi_index::{NonUniqueIndex, UniqueIndex}; + pub use crate::multi_index::UniqueIndex; pub use crate::NetworkDatabase; pub use ssv_types::*; pub use tempfile::tempdir; diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index a70dd860..537f21d2 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -243,7 +243,6 @@ pub mod queries { .expect("Failed to prepare statement"); let shares: Result, _> = stmt .query_map(params![pubkey.to_string()], |row| { - let share_pubkey_str = row.get::<_, String>(0)?; let share_pubkey = PublicKey::from_str(&share_pubkey_str).unwrap(); let encrypted_private_key: [u8; 256] = row.get(1)?; @@ -463,7 +462,6 @@ pub mod assertions { assert_eq!(s1.share_pubkey, s2.share_pubkey); } - // Verifies that a share is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey) { let db_share = db.state.multi_state.shares.get_by(validator_pubkey); @@ -472,20 +470,23 @@ pub mod assertions { // Verifies that all of the shares for a validator are in the database pub fn exists_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey, s: &Vec) { - let db_shares = queries::get_shares(db, validator_pubkey).expect("Shares should exist in db"); + let db_shares = + queries::get_shares(db, validator_pubkey).expect("Shares should exist in db"); // have to pair them up since we dont know what order they will be returned from db in - db_shares.iter().flat_map(|share| { - s.iter().filter(|share2| share.operator_id == share2.operator_id) - .map(move |share2| (share, share2)) - }) - .for_each(|(share, share2)| data(share, share2)); + db_shares + .iter() + .flat_map(|share| { + s.iter() + .filter(|share2| share.operator_id == share2.operator_id) + .map(move |share2| (share, share2)) + }) + .for_each(|(share, share2)| data(share, share2)); } // Verifies that all of the shares for a validator are not in the database pub fn exists_not_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey) { let shares = queries::get_shares(db, validator_pubkey); assert!(shares.is_none()); - } } } From a9ac7a5df17c12348d07b9e42f33e0df29d8f18e Mon Sep 17 00:00:00 2001 From: Zachary Holme Date: Sat, 21 Dec 2024 11:43:59 -0600 Subject: [PATCH 36/50] clippy --- anchor/database/src/cluster_operations.rs | 4 +- anchor/database/src/tests/utils.rs | 45 ++++++++++----------- anchor/database/src/validator_operations.rs | 4 +- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index c0b2044e..af110832 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -111,12 +111,12 @@ impl NetworkDatabase { .execute(params![validator_pubkey.to_string()])?; // remove the validators share and its metadata - self.state.multi_state.shares.remove(&validator_pubkey); + self.state.multi_state.shares.remove(validator_pubkey); let metadata = self .state .multi_state .validator_metadata - .remove(&validator_pubkey) + .remove(validator_pubkey) .expect("Data should have existed"); // if this cluster no longer contains any validators, remove it from the cluster map diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 537f21d2..93bbc6a2 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -210,13 +210,12 @@ pub mod queries { let mut stmt = conn .prepare(GET_OPERATOR) .expect("Failed to prepare statement"); - let operators = stmt - .query_row(params![*id], |row| { - let operator = Operator::try_from(row).expect("Failed to create operator"); - Ok(operator) - }) - .ok(); - operators + + stmt.query_row(params![*id], |row| { + let operator = Operator::try_from(row).expect("Failed to create operator"); + Ok(operator) + }) + .ok() } // Get a Cluster from the database @@ -226,13 +225,12 @@ pub mod queries { let mut stmt = conn .prepare(GET_CLUSTER) .expect("Failed to prepare statement"); - let cluster = stmt - .query_row(params![*id], |row| { - let cluster = Cluster::try_from((row, members))?; - Ok(cluster) - }) - .ok(); - cluster + + stmt.query_row(params![*id], |row| { + let cluster = Cluster::try_from((row, members))?; + Ok(cluster) + }) + .ok() } // Get a share from the database @@ -299,13 +297,12 @@ pub mod queries { let mut stmt = conn .prepare(GET_VALIDATOR) .expect("Failed to prepare statement"); - let validator = stmt - .query_row(params![validator_pubkey], |row| { - let validator = ValidatorMetadata::try_from(row)?; - Ok(validator) - }) - .ok(); - validator + + stmt.query_row(params![validator_pubkey], |row| { + let validator = ValidatorMetadata::try_from(row)?; + Ok(validator) + }) + .ok() } } @@ -418,7 +415,7 @@ pub mod assertions { } // Verifies that the cluster is in memory pub fn exists_in_memory(db: &NetworkDatabase, c: &Cluster) { - assert!(db.member_of_cluster(&c.cluster_id) == true); + assert!(db.member_of_cluster(&c.cluster_id)); let stored_cluster = db .state .multi_state @@ -430,7 +427,7 @@ pub mod assertions { // Verifies that the cluster is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, cluster_id: ClusterId) { - assert!(db.member_of_cluster(&cluster_id) == false); + assert!(!db.member_of_cluster(&cluster_id)); let stored_cluster = db.state.multi_state.clusters.get_by(&cluster_id); assert!(stored_cluster.is_none()); } @@ -469,7 +466,7 @@ pub mod assertions { } // Verifies that all of the shares for a validator are in the database - pub fn exists_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey, s: &Vec) { + pub fn exists_in_db(db: &NetworkDatabase, validator_pubkey: &PublicKey, s: &[Share]) { let db_shares = queries::get_shares(db, validator_pubkey).expect("Shares should exist in db"); // have to pair them up since we dont know what order they will be returned from db in diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 6841f7d6..3bb24755 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -48,14 +48,14 @@ impl NetworkDatabase { .state .multi_state .validator_metadata - .get_by(&validator_pubkey) + .get_by(validator_pubkey) { // update graffiti and insert back in to update validator.graffiti = graffiti; self.state .multi_state .validator_metadata - .update(&validator_pubkey, validator); + .update(validator_pubkey, validator); } Ok(()) } From 3ff12ed8f3ef4cfe20e5595f753581c2b5a64102 Mon Sep 17 00:00:00 2001 From: Zachary Holme Date: Sat, 21 Dec 2024 12:38:57 -0600 Subject: [PATCH 37/50] make multistate pub --- anchor/database/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 44300376..dc6ad24b 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -87,7 +87,7 @@ pub struct NetworkDatabase { /// The public key of our operator pubkey: Rsa, /// Custom state stores for easy data access - state: NetworkState, + pub state: NetworkState, /// Connection to the database conn_pool: Pool, } From 56358382543ba9d94ccce153928bf6f4e6893e1f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 23 Dec 2024 14:15:35 +0000 Subject: [PATCH 38/50] re-export and save all metadata and clusters --- anchor/database/src/cluster_operations.rs | 37 +++++++++++------------ anchor/database/src/lib.rs | 2 +- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index af110832..01809457 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -52,35 +52,34 @@ impl NetworkDatabase { // If we are a member in this cluster, store relevant information if let Some(share) = our_share { - let cluster_id = cluster.cluster_id; - // Record that we are a member of this cluster - self.state.single_state.clusters.insert(cluster_id); + self.state.single_state.clusters.insert(cluster.cluster_id); // Save the keyshare self.state.multi_state.shares.insert( &validator.public_key, // The validator this keyshare belongs to - &cluster_id, // The id of the cluster + &cluster.cluster_id, // The id of the cluster &cluster.owner, // The owner of the cluster share.to_owned(), // The keyshare itself ); + } - // Save all cluster related information - self.state.multi_state.clusters.insert( - &cluster_id, // The id of the cluster - &validator.public_key, // The public key of validator added to the cluster - &cluster.owner, // Owner of the cluster - cluster.to_owned(), // The Cluster and all containing information - ); + // Save all cluster related information + self.state.multi_state.clusters.insert( + &cluster.cluster_id, // The id of the cluster + &validator.public_key, // The public key of validator added to the cluster + &cluster.owner, // Owner of the cluster + cluster.to_owned(), // The Cluster and all containing information + ); + + // Save the metadata for the validators + self.state.multi_state.validator_metadata.insert( + &validator.public_key, // The public key of the validator + &cluster.cluster_id, // The id of the cluster the validator belongs to + &cluster.owner, // The owner of the cluster + validator.to_owned(), // The metadata of the validator + ); - // Save the metadata for the validators - self.state.multi_state.validator_metadata.insert( - &validator.public_key, // The public key of the validator - &cluster_id, // The id of the cluster the validator belongs to - &cluster.owner, // The owner of the cluster - validator.to_owned(), // The metadata of the validator - ); - } Ok(()) } diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index dc6ad24b..0d0455ed 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -11,7 +11,7 @@ use std::time::Duration; use types::{Address, PublicKey}; pub use crate::error::DatabaseError; -use crate::multi_index::{MultiIndexMap, *}; +pub use crate::multi_index::{MultiIndexMap, *}; use crate::sql_operations::{SqlStatement, SQL}; mod cluster_operations; From d2648d15928b6c39b1bdb3714fb179a298c78645 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 23 Dec 2024 15:00:55 +0000 Subject: [PATCH 39/50] clean getters for multi state --- anchor/database/src/cluster_operations.rs | 29 ++++++++------------- anchor/database/src/lib.rs | 26 +++++++++--------- anchor/database/src/state.rs | 23 +++++++++++++--- anchor/database/src/tests/utils.rs | 18 ++++--------- anchor/database/src/validator_operations.rs | 18 +++---------- 5 files changed, 52 insertions(+), 62 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 01809457..b243ee5c 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -56,7 +56,7 @@ impl NetworkDatabase { self.state.single_state.clusters.insert(cluster.cluster_id); // Save the keyshare - self.state.multi_state.shares.insert( + self.shares().insert( &validator.public_key, // The validator this keyshare belongs to &cluster.cluster_id, // The id of the cluster &cluster.owner, // The owner of the cluster @@ -65,7 +65,7 @@ impl NetworkDatabase { } // Save all cluster related information - self.state.multi_state.clusters.insert( + self.clusters().insert( &cluster.cluster_id, // The id of the cluster &validator.public_key, // The public key of validator added to the cluster &cluster.owner, // Owner of the cluster @@ -73,7 +73,7 @@ impl NetworkDatabase { ); // Save the metadata for the validators - self.state.multi_state.validator_metadata.insert( + self.metadata().insert( &validator.public_key, // The public key of the validator &cluster.cluster_id, // The id of the cluster the validator belongs to &cluster.owner, // The owner of the cluster @@ -93,9 +93,9 @@ impl NetworkDatabase { ])?; // get and update the cluster if we are a part of it - if let Some(mut cluster) = self.state.multi_state.clusters.get_by(&cluster_id) { + if let Some(mut cluster) = self.clusters().get_by(&cluster_id) { cluster.liquidated = status; - self.state.multi_state.clusters.update(&cluster_id, cluster); + self.clusters().update(&cluster_id, cluster); } Ok(()) @@ -110,23 +110,16 @@ impl NetworkDatabase { .execute(params![validator_pubkey.to_string()])?; // remove the validators share and its metadata - self.state.multi_state.shares.remove(validator_pubkey); + self.shares().remove(validator_pubkey); let metadata = self - .state - .multi_state - .validator_metadata + .metadata() .remove(validator_pubkey) .expect("Data should have existed"); - // if this cluster no longer contains any validators, remove it from the cluster map - if self - .state - .multi_state - .validator_metadata - .get_all_by(&metadata.cluster_id) - .is_none() - { - self.state.multi_state.clusters.remove(&metadata.cluster_id); + // If there is no longer and validators for this cluster, remove it from both the cluster + // multi index map and the cluster membership set + if self.metadata().get_all_by(&metadata.cluster_id).is_none() { + self.clusters().remove(&metadata.cluster_id); self.state .single_state .clusters diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 0d0455ed..cd15a5f0 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -31,36 +31,36 @@ type PoolConn = r2d2::PooledConnection; const POOL_SIZE: u32 = 1; const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); -type ShareMultiIndexMap = +pub(crate) type ShareMultiIndexMap = MultiIndexMap; -type MetadataMultiIndexMap = +pub(crate) type MetadataMultiIndexMap = MultiIndexMap; -type ClusterMultiIndexMap = +pub(crate) type ClusterMultiIndexMap = MultiIndexMap; // Information that needs to be accesses via multiple different indicies #[derive(Debug)] -pub struct MultiState { +struct MultiState { /// All of the shares that belong to use /// Primary: public key of validator. uniquely identifies share /// Secondary: cluster id. corresponds to a list of shares /// Tertiary: owner of the cluster. corresponds to a list of shares - pub shares: ShareMultiIndexMap, - /// Metadata for validators that delegate to us + shares: ShareMultiIndexMap, + /// Metadata for all validators in the network /// Primary: public key of the validator. uniquely identifies the metadata /// Secondary: cluster id. corresponds to list of metadata for all validators /// Tertiary: owner of the cluster: corresponds to list of metadata for all validators - pub validator_metadata: MetadataMultiIndexMap, - /// All cluster data for each cluster we are a member in + validator_metadata: MetadataMultiIndexMap, + /// All of the clusters in the network /// Primary: cluster id. uniquely identifies a cluster /// Secondary: public key of the validator. uniquely identifies a cluster /// Tertiary: owner of the cluster. uniquely identifies a cluster - pub clusters: ClusterMultiIndexMap, + clusters: ClusterMultiIndexMap, } // General information that can be single index access #[derive(Debug, Default)] -pub struct SingleState { +struct SingleState { /// The ID of our own operator. This is determined via events when the operator is /// registered with the network. Therefore, this may not be available right away if the client /// is running but has not been registered with the network contract yet. @@ -75,8 +75,8 @@ pub struct SingleState { // Container to hold all network state #[derive(Debug)] -pub struct NetworkState { - pub multi_state: MultiState, +struct NetworkState { + multi_state: MultiState, single_state: SingleState, } @@ -87,7 +87,7 @@ pub struct NetworkDatabase { /// The public key of our operator pubkey: Rsa, /// Custom state stores for easy data access - pub state: NetworkState, + state: NetworkState, /// Connection to the database conn_pool: Pool, } diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index bfe45756..499a3ecf 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -193,11 +193,21 @@ impl NetworkState { } } -// Interface for accessing single state data +// Interface for accessing state data impl NetworkDatabase { - /// Get operator data from in-memory store - pub fn get_operator(&self, id: &OperatorId) -> Option { - self.state.single_state.operators.get(id).map(|v| v.clone()) + /// Get a reference to the shares map + pub fn shares(&self) -> &ShareMultiIndexMap { + &self.state.multi_state.shares + } + + /// Get a reference to the validator metadata map + pub fn metadata(&self) -> &MetadataMultiIndexMap { + &self.state.multi_state.validator_metadata + } + + /// Get a reference to the cluster map + pub fn clusters(&self) -> &ClusterMultiIndexMap { + &self.state.multi_state.clusters } /// Get the ID of our Operator if it exists @@ -210,6 +220,11 @@ impl NetworkDatabase { } } + /// Get operator data from in-memory store + pub fn get_operator(&self, id: &OperatorId) -> Option { + self.state.single_state.operators.get(id).map(|v| v.clone()) + } + /// Check if an operator exists pub fn operator_exists(&self, id: &OperatorId) -> bool { self.state.single_state.operators.contains_key(id) diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 93bbc6a2..ef6415c0 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -370,9 +370,7 @@ pub mod assertions { // Verifies that the cluster is in memory pub fn exists_in_memory(db: &NetworkDatabase, v: &ValidatorMetadata) { let stored_validator = db - .state - .multi_state - .validator_metadata + .metadata() .get_by(&v.public_key) .expect("Metadata should exist"); data(v, &stored_validator); @@ -380,11 +378,7 @@ pub mod assertions { // Verifies that the cluster is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, v: &ValidatorMetadata) { - let stored_validator = db - .state - .multi_state - .validator_metadata - .get_by(&v.public_key); + let stored_validator = db.metadata().get_by(&v.public_key); assert!(stored_validator.is_none()); } @@ -417,9 +411,7 @@ pub mod assertions { pub fn exists_in_memory(db: &NetworkDatabase, c: &Cluster) { assert!(db.member_of_cluster(&c.cluster_id)); let stored_cluster = db - .state - .multi_state - .clusters + .clusters() .get_by(&c.cluster_id) .expect("Cluster should exist"); data(c, &stored_cluster) @@ -428,7 +420,7 @@ pub mod assertions { // Verifies that the cluster is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, cluster_id: ClusterId) { assert!(!db.member_of_cluster(&cluster_id)); - let stored_cluster = db.state.multi_state.clusters.get_by(&cluster_id); + let stored_cluster = db.clusters().get_by(&cluster_id); assert!(stored_cluster.is_none()); } @@ -461,7 +453,7 @@ pub mod assertions { // Verifies that a share is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey) { - let db_share = db.state.multi_state.shares.get_by(validator_pubkey); + let db_share = db.shares().get_by(validator_pubkey); assert!(db_share.is_none()); } diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 3bb24755..ab4eb5c5 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -18,12 +18,10 @@ impl NetworkDatabase { ])?; // If we are in the cluster, update the in memory fee recipient for the cluster - if let Some(mut cluster) = self.state.multi_state.clusters.get_by(&owner) { + if let Some(mut cluster) = self.clusters().get_by(&owner) { // update recipient and insert back in to update cluster.fee_recipient = fee_recipient; - self.state - .multi_state - .clusters + self.clusters() .update(&cluster.cluster_id, cluster.to_owned()); } Ok(()) @@ -44,18 +42,10 @@ impl NetworkDatabase { ])?; // If we are an operator for the validator, update the in memory grafitti - if let Some(mut validator) = self - .state - .multi_state - .validator_metadata - .get_by(validator_pubkey) - { + if let Some(mut validator) = self.metadata().get_by(validator_pubkey) { // update graffiti and insert back in to update validator.graffiti = graffiti; - self.state - .multi_state - .validator_metadata - .update(validator_pubkey, validator); + self.metadata().update(validator_pubkey, validator); } Ok(()) } From e69fb3ad9fa0bec8344d1a66cb9bbf60a33e646f Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 23 Dec 2024 15:12:11 +0000 Subject: [PATCH 40/50] rebuild all clusters and share-metadata information upon restart --- anchor/database/src/cluster_operations.rs | 2 +- anchor/database/src/sql_operations.rs | 6 ++---- anchor/database/src/state.rs | 22 ++++++++++++---------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index b243ee5c..4afba451 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -50,7 +50,7 @@ impl NetworkDatabase { // Commit all operations to the db tx.commit()?; - // If we are a member in this cluster, store relevant information + // If we are a member in this cluster, store membership and our share if let Some(share) = our_share { // Record that we are a member of this cluster self.state.single_state.clusters.insert(cluster.cluster_id); diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index 1257d6af..736ddd99 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -79,8 +79,7 @@ pub(crate) static SQL: LazyLock> = LazyLock: c.faulty, c.liquidated FROM clusters c - JOIN cluster_members cm ON c.cluster_id = cm.cluster_id - WHERE cm.operator_id = ?", + JOIN cluster_members cm ON c.cluster_id = cm.cluster_id", ); m.insert( SqlStatement::GetClusterMembers, @@ -123,8 +122,7 @@ pub(crate) static SQL: LazyLock> = LazyLock: s.encrypted_key, s.operator_id FROM validators v - JOIN shares s ON v.validator_pubkey = s.validator_pubkey - WHERE s.operator_id = ?1", + JOIN shares s ON v.validator_pubkey = s.validator_pubkey", ); m }); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 499a3ecf..e9477073 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -48,8 +48,8 @@ impl NetworkState { // 2) ClusterId -> Vec<(Share, ValidatorMetadata)> // This simplifies data reconstruction and makes it easy to add more customized stores in the future let operators = Self::fetch_operators(&conn)?; - let share_validator = Self::fetch_shares_and_validators(&conn, id)?; - let clusters = Self::fetch_clusters(&conn, id)?; + let share_validator = Self::fetch_shares_and_validators(&conn)?; + let clusters = Self::fetch_clusters(&conn)?; // Second phase: Populate all in memory stores with data; let shares_multi: ShareMultiIndexMap = MultiIndexMap::new(); @@ -81,7 +81,13 @@ impl NetworkState { .get(&cluster_id) .expect("Cluster should exist") .owner; - shares_multi.insert(&metadata.public_key, &cluster_id, &cluster_owner, share); + + // if the share is owned by this operator, save it + if share.operator_id == id { + shares_multi.insert(&metadata.public_key, &cluster_id, &cluster_owner, share); + } + + // save all validator metadata metadata_multi.insert( &metadata.public_key, &cluster_id, @@ -141,11 +147,10 @@ impl NetworkState { // guarantee that they pair up correctly fn fetch_shares_and_validators( conn: &PoolConn, - operator_id: OperatorId, ) -> Result, DatabaseError> { let mut stmt = conn.prepare(SQL[&SqlStatement::GetShareAndValidator])?; let data = stmt - .query_map([*operator_id], |row| { + .query_map([], |row| { let metadata = ValidatorMetadata::try_from(row)?; let share = Share::try_from(row)?; Ok((metadata.cluster_id, (share, metadata))) @@ -155,13 +160,10 @@ impl NetworkState { } // Fetch and transform cluster data for a specific operator - fn fetch_clusters( - conn: &PoolConn, - operator_id: OperatorId, - ) -> Result, DatabaseError> { + fn fetch_clusters(conn: &PoolConn) -> Result, DatabaseError> { let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllClusters])?; let clusters = stmt - .query_map([*operator_id], |row| { + .query_map([], |row| { let cluster_id = ClusterId(row.get(0)?); println!("got here"); From 431d4eb56c6e4233c32360744466db06fe74102a Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 23 Dec 2024 15:12:38 +0000 Subject: [PATCH 41/50] remove print --- anchor/database/src/state.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index e9477073..28a65778 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -165,7 +165,6 @@ impl NetworkState { let clusters = stmt .query_map([], |row| { let cluster_id = ClusterId(row.get(0)?); - println!("got here"); // Get all of the members for this cluster let cluster_members = Self::fetch_cluster_members(conn, cluster_id)?; From 43b2e7121dba05547fb3f1f16879c382d4cffcaa Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 24 Dec 2024 16:30:33 +0000 Subject: [PATCH 42/50] fix state reconstruction --- anchor/common/ssv_types/src/share.rs | 2 + .../common/ssv_types/src/sql_conversions.rs | 25 ++-- anchor/database/src/cluster_operations.rs | 1 + anchor/database/src/multi_index.rs | 5 + anchor/database/src/sql_operations.rs | 96 ++++++++------ anchor/database/src/state.rs | 121 +++++++++++------- anchor/database/src/tests/cluster_tests.rs | 6 +- anchor/database/src/tests/state_tests.rs | 53 ++++++++ anchor/database/src/tests/utils.rs | 37 +++++- anchor/database/src/validator_operations.rs | 40 +++--- 10 files changed, 262 insertions(+), 124 deletions(-) diff --git a/anchor/common/ssv_types/src/share.rs b/anchor/common/ssv_types/src/share.rs index a7180b54..77fefdad 100644 --- a/anchor/common/ssv_types/src/share.rs +++ b/anchor/common/ssv_types/src/share.rs @@ -4,6 +4,8 @@ use types::PublicKey; /// One of N shares of a split validator key. #[derive(Debug, Clone)] pub struct Share { + /// Public Key of the validator + pub validator_pubkey: PublicKey, /// Operator this share belongs to pub operator_id: OperatorId, /// Cluster the operator who owns this share belongs to diff --git a/anchor/common/ssv_types/src/sql_conversions.rs b/anchor/common/ssv_types/src/sql_conversions.rs index 37a7422b..20ffe847 100644 --- a/anchor/common/ssv_types/src/sql_conversions.rs +++ b/anchor/common/ssv_types/src/sql_conversions.rs @@ -83,6 +83,7 @@ impl TryFrom<(&Row<'_>, Vec)> for Cluster { } } +// Conversion from SQL row to a ClusterMember impl TryFrom<&Row<'_>> for ClusterMember { type Error = rusqlite::Error; @@ -101,7 +102,6 @@ impl TryFrom<&Row<'_>> for ClusterMember { } // Conversion from SQL row to ValidatorMetadata -// Intertwined with Share conversion via "GetShareAndValidator" impl TryFrom<&Row<'_>> for ValidatorMetadata { type Error = SqlError; fn try_from(row: &Row) -> Result { @@ -129,23 +129,28 @@ impl TryFrom<&Row<'_>> for ValidatorMetadata { } // Conversion from SQL row into a Share -// Intertwined with Metadata conversion via "GetShareAndValidator" impl TryFrom<&Row<'_>> for Share { type Error = rusqlite::Error; fn try_from(row: &Row) -> Result { - // Get Share PublicKey from column 4 - let share_pubkey_str = row.get::<_, String>(4)?; + // Get Share PublicKey from column 0 + let share_pubkey_str = row.get::<_, String>(0)?; let share_pubkey = PublicKey::from_str(&share_pubkey_str) - .map_err(|e| from_sql_error(4, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + .map_err(|e| from_sql_error(0, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; + + // Get the encrypted private key from column 1 + let encrypted_private_key: [u8; 256] = row.get(1)?; - // Get the encrypted private key from column 5 - let encrypted_private_key: [u8; 256] = row.get(5)?; + // Get the OperatorId from column 2 and ClusterId from column 3 + let operator_id = OperatorId(row.get(2)?); + let cluster_id = ClusterId(row.get(3)?); - // Get the OperatorId from column 6 and ClusterId from column 1 - let operator_id = OperatorId(row.get(6)?); - let cluster_id = ClusterId(row.get(1)?); + // Get the Validator PublicKey from column 4 + let validator_pubkey_str = row.get::<_, String>(4)?; + let validator_pubkey = PublicKey::from_str(&validator_pubkey_str) + .map_err(|e| from_sql_error(4, Type::Text, Error::new(ErrorKind::InvalidInput, e)))?; Ok(Share { + validator_pubkey, operator_id, cluster_id, share_pubkey, diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 4afba451..5b310169 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -79,6 +79,7 @@ impl NetworkDatabase { &cluster.owner, // The owner of the cluster validator.to_owned(), // The metadata of the validator ); + println!("{:?}", validator); Ok(()) } diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs index a7dc846f..a8794566 100644 --- a/anchor/database/src/multi_index.rs +++ b/anchor/database/src/multi_index.rs @@ -118,6 +118,11 @@ where } } + /// Number of entires in the primary map + pub fn length(&self) -> usize { + self.maps.primary.len() + } + /// Insert a new value and associated keys into the map pub fn insert(&self, k1: &K1, k2: &K2, k3: &K3, v: V) { // Insert into primary map first diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index 736ddd99..ecbceef5 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -4,34 +4,36 @@ use std::sync::LazyLock; // Wrappers around various SQL statements used for interacting with the db #[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)] pub(crate) enum SqlStatement { - InsertOperator, - DeleteOperator, - GetOperatorId, - GetAllOperators, + InsertOperator, // Insert a new Operator in the database + DeleteOperator, // Delete an Operator from the database + GetOperatorId, // Get the ID of this operator from its public key + GetAllOperators, // Get all of the Operators in the database - InsertCluster, - InsertClusterMember, - UpdateClusterStatus, - UpdateClusterFaulty, - DeleteCluster, - GetAllClusters, - GetClusterMembers, + InsertCluster, // Insert a new Cluster into the database + InsertClusterMember, // Insert a new Cluster Member into the database + UpdateClusterStatus, // Update the active status of the cluster + UpdateClusterFaulty, // Update the number of faulty Operators in the cluster + GetAllClusters, // Get all Clusters for state reconstruction + GetClusterMembers, // Get all Cluster Members for state reconstruction - DeleteValidator, - InsertShare, - InsertValidator, - UpdateFeeRecipient, - SetGraffiti, - SetValidatorIndex, + InsertValidator, // Insert a Validator into the database + DeleteValidator, // Delete a Validator from the database + GetAllValidators, // Get all Validators for state reconstructions - UpdateBlockNumber, - GetBlockNumber, + InsertShare, // Insert a KeyShare into the database + GetShares, // Get the releveant keyshare for a validator - GetShareAndValidator, + UpdateFeeRecipient, // Update the fee recipient address for a cluster + SetGraffiti, // Update the Graffiti for a validator + + UpdateBlockNumber, // Update the last block that the database has processed + GetBlockNumber, // Get the last block that the database has processed } pub(crate) static SQL: LazyLock> = LazyLock::new(|| { let mut m = HashMap::new(); + + // Operator m.insert( SqlStatement::InsertOperator, "INSERT INTO operators (operator_id, public_key, owner_address) VALUES (?1, ?2, ?3)", @@ -45,30 +47,23 @@ pub(crate) static SQL: LazyLock> = LazyLock: "SELECT operator_id FROM operators WHERE public_key = ?1", ); m.insert(SqlStatement::GetAllOperators, "SELECT * FROM operators"); + + // Cluster m.insert( SqlStatement::InsertCluster, "INSERT OR IGNORE INTO clusters (cluster_id, owner, fee_recipient) VALUES (?1, ?2, ?3)", ); - m.insert( - SqlStatement::UpdateClusterStatus, - "UPDATE clusters SET liquidated = ?1 WHERE cluster_id = ?2", - ); - m.insert( - SqlStatement::UpdateClusterFaulty, - "UPDATE clusters SET faulty = ?1 WHERE cluster_id = ?2", - ); m.insert( SqlStatement::InsertClusterMember, "INSERT OR IGNORE INTO cluster_members (cluster_id, operator_id) VALUES (?1, ?2)", ); m.insert( - SqlStatement::DeleteCluster, - "DELETE FROM clusters WHERE cluster_id = ?1", + SqlStatement::UpdateClusterStatus, + "UPDATE clusters SET liquidated = ?1 WHERE cluster_id = ?2", ); - m.insert( - SqlStatement::DeleteValidator, - "DELETE from validators WHERE validator_pubkey = ?1", + SqlStatement::UpdateClusterFaulty, + "UPDATE clusters SET faulty = ?1 WHERE cluster_id = ?2", ); m.insert( SqlStatement::GetAllClusters, @@ -85,12 +80,32 @@ pub(crate) static SQL: LazyLock> = LazyLock: SqlStatement::GetClusterMembers, "SELECT operator_id FROM cluster_members WHERE cluster_id = ?1", ); - m.insert(SqlStatement::InsertShare, - "INSERT INTO shares (validator_pubkey, cluster_id, operator_id, share_pubkey, encrypted_key) VALUES (?1, ?2, ?3, ?4, ?5)"); + + // Validator m.insert( SqlStatement::InsertValidator, "INSERT INTO validators (validator_pubkey, cluster_id, validator_index, graffiti) VALUES (?1, ?2, ?3, ?4)", ); + m.insert( + SqlStatement::DeleteValidator, + "DELETE from validators WHERE validator_pubkey = ?1", + ); + m.insert(SqlStatement::GetAllValidators, "SELECT * FROM validators"); + + // Shares + m.insert( + SqlStatement::InsertShare, + "INSERT INTO shares + (validator_pubkey, cluster_id, operator_id, share_pubkey, encrypted_key) + VALUES + (?1, ?2, ?3, ?4, ?5)", + ); + m.insert( + SqlStatement::GetShares, + "SELECT share_pubkey, encrypted_key, operator_id, cluster_id, validator_pubkey FROM shares WHERE operator_id = ?1" + ); + + // Misc Datta m.insert( SqlStatement::UpdateFeeRecipient, "UPDATE clusters SET fee_recipient = ?1 WHERE owner = ?2", @@ -99,10 +114,8 @@ pub(crate) static SQL: LazyLock> = LazyLock: SqlStatement::SetGraffiti, "UPDATE validators SET graffiti = ?1 WHERE validator_pubkey = ?2", ); - m.insert( - SqlStatement::SetValidatorIndex, - "UPDATE validators SET validator_index = ?1 WHERE validator_pubkey = ?2", - ); + + // Blocks m.insert( SqlStatement::UpdateBlockNumber, "UPDATE block SET block_number = ?1", @@ -111,8 +124,10 @@ pub(crate) static SQL: LazyLock> = LazyLock: SqlStatement::GetBlockNumber, "SELECT block_number FROM block", ); + + /* m.insert( - SqlStatement::GetShareAndValidator, + SqlStatement::GetValidatorAndShares, "SELECT v.validator_pubkey, v.cluster_id, @@ -124,5 +139,6 @@ pub(crate) static SQL: LazyLock> = LazyLock: FROM validators v JOIN shares s ON v.validator_pubkey = s.validator_pubkey", ); + */ m }); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 28a65778..a178258a 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -45,11 +45,13 @@ impl NetworkState { // First Phase: Fetch data from the database // Two main data structures for state reconstruction // 1) ClusterId -> Cluster - // 2) ClusterId -> Vec<(Share, ValidatorMetadata)> + // 2) ClusterId -> Vec + // 3) ClusterId -> Shares // This simplifies data reconstruction and makes it easy to add more customized stores in the future let operators = Self::fetch_operators(&conn)?; - let share_validator = Self::fetch_shares_and_validators(&conn)?; - let clusters = Self::fetch_clusters(&conn)?; + let cluster_map = Self::fetch_clusters(&conn)?; + let validator_map = Self::fetch_validators(&conn)?; + let share_map = Self::fetch_shares(&conn, id)?; // Second phase: Populate all in memory stores with data; let shares_multi: ShareMultiIndexMap = MultiIndexMap::new(); @@ -59,42 +61,46 @@ impl NetworkState { id: AtomicU64::new(*id), last_processed_block: AtomicU64::new(last_processed_block), operators: DashMap::from_iter(operators), - clusters: DashSet::from_iter(clusters.keys().copied()), + clusters: DashSet::from_iter(cluster_map.keys().copied()), }; - // Insert all of the cluster information - clusters.iter().for_each(|(cluster_id, cluster)| { - let validator_key = share_validator + // Populate all multi-index maps in a single pass through clusters + for (cluster_id, cluster) in &cluster_map { + let validators = validator_map .get(cluster_id) - .expect("Validator should exist") - .1 - .public_key - .clone(); - cluster_multi.insert(cluster_id, &validator_key, &cluster.owner, cluster.clone()); - }); - - // Insert all of the share and validator_metadata - share_validator - .into_iter() - .for_each(|(cluster_id, (share, metadata))| { - let cluster_owner = clusters - .get(&cluster_id) - .expect("Cluster should exist") - .owner; - - // if the share is owned by this operator, save it - if share.operator_id == id { - shares_multi.insert(&metadata.public_key, &cluster_id, &cluster_owner, share); - } + .expect("Validator for cluster must exist"); - // save all validator metadata + // Process each validator and its associated data + for validator in validators { + // Insert cluster and validator metadata + cluster_multi.insert( + cluster_id, + &validator.public_key, + &cluster.owner, + cluster.clone(), + ); metadata_multi.insert( - &metadata.public_key, - &cluster_id, - &cluster_owner, - metadata.to_owned(), + &validator.public_key, + cluster_id, + &cluster.owner, + validator.clone(), ); - }); + + // Process shares if they exist for this cluster + if let Some(shares) = share_map.get(cluster_id) { + for share in shares { + if share.validator_pubkey == validator.public_key { + shares_multi.insert( + &validator.public_key, + cluster_id, + &cluster.owner, + share.clone(), + ); + } + } + } + } + } // Return fully constructed state Ok(Self { @@ -143,20 +149,23 @@ impl NetworkState { operators.collect() } - // Fetch all of the validators and their associated share. Fetched together so that we can - // guarantee that they pair up correctly - fn fetch_shares_and_validators( + // Fetch and transform validator data from the database + fn fetch_validators( conn: &PoolConn, - ) -> Result, DatabaseError> { - let mut stmt = conn.prepare(SQL[&SqlStatement::GetShareAndValidator])?; - let data = stmt - .query_map([], |row| { - let metadata = ValidatorMetadata::try_from(row)?; - let share = Share::try_from(row)?; - Ok((metadata.cluster_id, (share, metadata))) - })? - .map(|result| result.map_err(DatabaseError::from)); - data.collect::, _>>() + ) -> Result>, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllValidators])?; + let validators = stmt + .query_map([], |row| ValidatorMetadata::try_from(row))? + .map(|result| result.map_err(DatabaseError::from)) + .collect::, _>>()?; + + let mut map = HashMap::new(); + for validator in validators { + map.entry(validator.cluster_id) + .or_insert_with(Vec::new) + .push(validator); + } + Ok(map) } // Fetch and transform cluster data for a specific operator @@ -192,6 +201,26 @@ impl NetworkState { members.collect() } + + // Fetch the shares that this operators owns + fn fetch_shares( + conn: &PoolConn, + id: OperatorId, + ) -> Result>, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetShares])?; + let shares = stmt + .query_map([*id], |row| Share::try_from(row))? + .map(|result| result.map_err(DatabaseError::from)) + .collect::, _>>()?; + + let mut map = HashMap::new(); + for share in shares { + map.entry(share.cluster_id) + .or_insert_with(Vec::new) + .push(share); + } + Ok(map) + } } // Interface for accessing state data diff --git a/anchor/database/src/tests/cluster_tests.rs b/anchor/database/src/tests/cluster_tests.rs index 62550f7f..33c1755a 100644 --- a/anchor/database/src/tests/cluster_tests.rs +++ b/anchor/database/src/tests/cluster_tests.rs @@ -61,7 +61,11 @@ mod cluster_database_tests { let fixture = TestFixture::new_empty(); let cluster = generators::cluster::random(4); let metadata = generators::validator::random_metadata(cluster.cluster_id); - let shares = vec![generators::share::random(cluster.cluster_id, OperatorId(1))]; + let shares = vec![generators::share::random( + cluster.cluster_id, + OperatorId(1), + &fixture.validator.public_key, + )]; fixture .db .insert_validator(cluster, metadata, shares) diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index efc14c0f..34652671 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -39,6 +39,59 @@ mod state_database_tests { assertions::validator::exists_in_memory(&fixture.db, &fixture.validator); } + #[test] + // Test that a this operator owns is in memory after restart + fn test_shares_after_restart() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(); + + // drop and recrate database + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // Confim share data, there should be one share in memory for this operator + assert!(fixture.db.shares().length() == 1); + let pk = &fixture.validator.public_key; + let share = fixture + .db + .shares() + .get_by(pk) + .expect("The share should exist"); + assertions::share::exists_in_memory(&fixture.db, pk, &share); + } + + #[test] + // Test that we have multi validators in memory after restart + fn test_multiple_entries() { + // Create new test fixture with populated DB + let mut fixture = TestFixture::new(); + + // Generate new validator information + let cluster = fixture.cluster; + let new_validator = generators::validator::random_metadata(cluster.cluster_id); + let mut shares: Vec = Vec::new(); + fixture.operators.iter().for_each(|op| { + let share = + generators::share::random(cluster.cluster_id, op.id, &new_validator.public_key); + shares.push(share); + }); + fixture + .db + .insert_validator(cluster, new_validator, shares) + .expect("Insert should not fail"); + + // drop and recrate database + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // assert that there are two validators, one cluster, and 2 shares in memory + assert!(fixture.db.metadata().length() == 2); + assert!(fixture.db.shares().length() == 2); + assert!(fixture.db.clusters().length() == 1); + } + #[test] // Test that you can update and retrieve a block number fn test_block_number() { diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index ef6415c0..978df93b 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -25,8 +25,8 @@ pub struct TestFixture { } impl TestFixture { - // Generate a database that is populated with a full cluster. We are a member of the cluster so - // the in state store will also be populated + // Generate a database that is populated with a full cluster. This operator is a prt of the + // cluster, so membership data should be saved pub fn new() -> Self { // generate the operators and pick the first one to be us let operators: Vec = (0..DEFAULT_NUM_OPERATORS) @@ -41,21 +41,33 @@ impl TestFixture { let temp_dir = TempDir::new().expect("Failed to create temporary directory"); let db_path = temp_dir.path().join("test.db"); let db = NetworkDatabase::new(&db_path, &us).expect("Failed to create DB"); + + // Insert all of the operators operators.iter().for_each(|op| { db.insert_operator(op).expect("Failed to insert operator"); }); - // Build cluster, shares, and validator data + // Build a cluster with all of the operators previously inserted let cluster = generators::cluster::with_operators(&operators); + + // Generate one validator that will delegate to this cluster let validator = generators::validator::random_metadata(cluster.cluster_id); + + // Generate shares for the validator. Each operator will have one share let shares: Vec = operators .iter() - .map(|op| generators::share::random(cluster.cluster_id, op.id)) + .map(|op| generators::share::random(cluster.cluster_id, op.id, &validator.public_key)) .collect(); db.insert_validator(cluster.clone(), validator.clone(), shares.clone()) .expect("Failed to insert cluster"); + // End state: + // There are DEFAULT_NUM_OPERATORS operators in the network + // There is a single cluster with a single validator + // The operators acting on behalf of the validator are all of the operators in the network + // Each operator has a piece of the keyshare for the validator + Self { db, cluster, @@ -145,8 +157,9 @@ pub mod generators { pub mod share { use super::*; // Generate a random keyshare - pub fn random(cluster_id: ClusterId, operator_id: OperatorId) -> Share { + pub fn random(cluster_id: ClusterId, operator_id: OperatorId, pk: &PublicKey) -> Share { Share { + validator_pubkey: pk.clone(), operator_id, cluster_id, share_pubkey: pubkey::random(), @@ -250,6 +263,7 @@ pub mod queries { let operator_id = OperatorId(row.get(3)?); Ok(Share { + validator_pubkey: pubkey.clone(), operator_id, cluster_id, share_pubkey, @@ -451,10 +465,19 @@ pub mod assertions { assert_eq!(s1.share_pubkey, s2.share_pubkey); } + // Verifies that a share is in memory + pub fn exists_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey, s: &Share) { + let stored_share = db + .shares() + .get_by(validator_pubkey) + .expect("Share should exist"); + data(s, &stored_share); + } + // Verifies that a share is not in memory pub fn exists_not_in_memory(db: &NetworkDatabase, validator_pubkey: &PublicKey) { - let db_share = db.shares().get_by(validator_pubkey); - assert!(db_share.is_none()); + let stored_share = db.shares().get_by(validator_pubkey); + assert!(stored_share.is_none()); } // Verifies that all of the shares for a validator are in the database diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index ab4eb5c5..55d481e0 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -10,16 +10,16 @@ impl NetworkDatabase { owner: Address, fee_recipient: Address, ) -> Result<(), DatabaseError> { - let conn = self.connection()?; - conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? - .execute(params![ - fee_recipient.to_string(), // new fee recipient address for entire cluster - owner.to_string() // owner of the cluster - ])?; - - // If we are in the cluster, update the in memory fee recipient for the cluster + // Make sure the cluster exists by getting the in memory entry if let Some(mut cluster) = self.clusters().get_by(&owner) { - // update recipient and insert back in to update + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? + .execute(params![ + fee_recipient.to_string(), // New fee recipient address for entire cluster + owner.to_string() // Owner of the cluster + ])?; + + // Update recipient address on the entry and update it in the map cluster.fee_recipient = fee_recipient; self.clusters() .update(&cluster.cluster_id, cluster.to_owned()); @@ -27,23 +27,23 @@ impl NetworkDatabase { Ok(()) } - /// Update the graffiti for a validator + /// Update the Graffiti for a Validator pub fn update_graffiti( &self, validator_pubkey: &PublicKey, graffiti: Graffiti, ) -> Result<(), DatabaseError> { - // Update the database - let conn = self.connection()?; - conn.prepare_cached(SQL[&SqlStatement::SetGraffiti])? - .execute(params![ - graffiti.0.as_slice(), // new graffiti - validator_pubkey.to_string() // the public key of the validator - ])?; - - // If we are an operator for the validator, update the in memory grafitti + // Make sure this validator exists by getting the in memory entry if let Some(mut validator) = self.metadata().get_by(validator_pubkey) { - // update graffiti and insert back in to update + // Update the database + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::SetGraffiti])? + .execute(params![ + graffiti.0.as_slice(), // New graffiti + validator_pubkey.to_string() // The public key of the validator + ])?; + + // Update the Graffifi field on the entry and update it in the map validator.graffiti = graffiti; self.metadata().update(validator_pubkey, validator); } From 5b653dd620d7bc571b83fb990c4f8ca6c1e24f94 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Tue, 24 Dec 2024 16:59:03 +0000 Subject: [PATCH 43/50] remove print --- anchor/database/src/cluster_operations.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 5b310169..4afba451 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -79,7 +79,6 @@ impl NetworkDatabase { &cluster.owner, // The owner of the cluster validator.to_owned(), // The metadata of the validator ); - println!("{:?}", validator); Ok(()) } From a147516dfa40da52c7ac3a20204af8459d7628fd Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Thu, 2 Jan 2025 21:48:13 +0000 Subject: [PATCH 44/50] error msg fix --- anchor/database/src/operator_operations.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index cf2ac09c..665e1aec 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -57,7 +57,7 @@ impl NetworkDatabase { // make sure that this operator exists if !self.operator_exists(&id) { return Err(DatabaseError::NotFound(format!( - "Operator with id {} already in database", + "Operator with id {} not in database", *id ))); } From ebd79ab0e275b4b91ca4b632e00a22176d0fc13a Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 3 Jan 2025 14:51:46 +0000 Subject: [PATCH 45/50] merge & update --- Cargo.lock | 207 +++++++++++++++++++++++++++-------------------------- 1 file changed, 104 insertions(+), 103 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ba2b93f0..e5b03cb4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6259a506ab13e1d658796c31e6e39d2e2ee89243bcc505ddc613b35732e0a430" +checksum = "0540fd0355d400b59633c27bd4b42173e59943f28e9d3376b77a24771d432d04" dependencies = [ "alloy-rlp", "arbitrary", @@ -218,7 +218,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -305,9 +305,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" @@ -493,7 +493,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", "synstructure", ] @@ -505,7 +505,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -557,13 +557,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -598,7 +598,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -896,7 +896,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "alloy-primitives", "arbitrary", @@ -1027,9 +1027,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.5" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31a0499c1dc64f458ad13872de75c0eb7e3fdb0e67964610c914b034fc5956e" +checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" dependencies = [ "jobserver", "libc", @@ -1136,7 +1136,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -1165,7 +1165,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "alloy-primitives", "clap", @@ -1229,7 +1229,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "itertools 0.10.5", ] @@ -1246,7 +1246,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "quote", "syn 1.0.109", @@ -1537,7 +1537,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -1585,7 +1585,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -1607,7 +1607,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -1798,7 +1798,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -1809,7 +1809,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -1830,7 +1830,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", "unicode-xid", ] @@ -1868,7 +1868,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2011,7 +2011,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -2172,7 +2172,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -2301,7 +2301,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2323,7 +2323,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2391,7 +2391,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "bytes", "discv5 0.9.0", @@ -2558,7 +2558,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -2806,7 +2806,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2972,7 +2972,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -3110,14 +3110,14 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gossipsub" @@ -3152,7 +3152,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "async-channel", "asynchronous-codec", @@ -3770,7 +3770,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -3905,7 +3905,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -3949,7 +3949,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "bytes", ] @@ -4145,7 +4145,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "arbitrary", "c-kzg", @@ -4582,7 +4582,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -4785,7 +4785,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4843,7 +4843,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "git-version", "target_info", @@ -4922,7 +4922,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -4970,7 +4970,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "fnv", ] @@ -5046,7 +5046,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5088,7 +5088,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "prometheus", ] @@ -5537,7 +5537,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -5797,7 +5797,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" dependencies = [ "memchr", - "thiserror 2.0.8", + "thiserror 2.0.9", "ucd-trie", ] @@ -5818,7 +5818,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -5930,7 +5930,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "reqwest", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6065,7 +6065,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -6096,7 +6096,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -6191,7 +6191,7 @@ dependencies = [ "rustc-hash 2.1.0", "rustls 0.23.20", "socket2 0.5.8", - "thiserror 2.0.8", + "thiserror 2.0.9", "tokio", "tracing", ] @@ -6210,7 +6210,7 @@ dependencies = [ "rustls 0.23.20", "rustls-pki-types", "slab", - "thiserror 2.0.8", + "thiserror 2.0.9", "tinyvec", "tracing", "web-time", @@ -6232,9 +6232,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -6817,9 +6817,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "rusty-fork" @@ -6858,7 +6858,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" [[package]] name = "salsa20" @@ -6890,7 +6890,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -7032,7 +7032,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "serde", "url", @@ -7040,9 +7040,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -7059,20 +7059,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" dependencies = [ "itoa", "memchr", @@ -7098,7 +7098,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -7654,7 +7654,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -7674,9 +7674,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "987bc0be1cdea8b10216bd06e2ca407d40b9543468fafd3ddfb02f36e77f71f3" dependencies = [ "proc-macro2", "quote", @@ -7703,7 +7703,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -7783,7 +7783,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "async-channel", "futures", @@ -7797,12 +7797,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", "rustix 0.38.42", "windows-sys 0.59.0", @@ -7841,7 +7842,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "quote", "syn 1.0.109", @@ -7858,11 +7859,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.8", + "thiserror-impl 2.0.9", ] [[package]] @@ -7873,18 +7874,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] name = "thiserror-impl" -version = "2.0.8" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -8025,7 +8026,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -8111,7 +8112,7 @@ checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "toml_datetime", - "winnow 0.6.20", + "winnow 0.6.21", ] [[package]] @@ -8188,7 +8189,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -8269,7 +8270,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -8356,7 +8357,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8446,9 +8447,9 @@ checksum = "ccb97dac3243214f8d8507998906ca3e2e0b900bf9bf4870477f125b82e68f6e" [[package]] name = "unicase" -version = "2.8.0" +version = "2.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" @@ -8534,7 +8535,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#42c64a2744759b7a0ef9852b0e8caf3b3cb4e7db" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -8738,7 +8739,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", "wasm-bindgen-shared", ] @@ -8773,7 +8774,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9132,9 +9133,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "e6f5bb5257f2407a5425c6e749bfd9692192a73e70a6060516ac04f889087d68" dependencies = [ "memchr", ] @@ -9280,7 +9281,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", "synstructure", ] @@ -9302,7 +9303,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -9322,7 +9323,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", "synstructure", ] @@ -9344,7 +9345,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] @@ -9366,7 +9367,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.94", ] [[package]] From c8eb685f9a4e1b87107432049eeb7f7dd9aac947 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Fri, 3 Jan 2025 15:24:30 +0000 Subject: [PATCH 46/50] spelling and formatting --- Cargo.lock | 6 ++-- anchor/database/src/cluster_operations.rs | 16 +++++----- anchor/database/src/lib.rs | 35 +++++++++++---------- anchor/database/src/operator_operations.rs | 18 +++++------ anchor/database/src/sql_operations.rs | 17 +--------- anchor/database/src/state.rs | 18 +++++------ anchor/database/src/tests/utils.rs | 4 +-- anchor/database/src/validator_operations.rs | 7 +++-- 8 files changed, 55 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e5b03cb4..19eac3e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8112,7 +8112,7 @@ checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ "indexmap", "toml_datetime", - "winnow 0.6.21", + "winnow 0.6.22", ] [[package]] @@ -9133,9 +9133,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.21" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6f5bb5257f2407a5425c6e749bfd9692192a73e70a6060516ac04f889087d68" +checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" dependencies = [ "memchr", ] diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index 4afba451..addf7965 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -6,7 +6,8 @@ use types::PublicKey; /// Implements all cluster related functionality on the database impl NetworkDatabase { - /// Inserts a new cluster into the database + /// Inserts a new validator into the database. A new cluster will be created if this is the + /// first validator for the cluster pub fn insert_validator( &self, cluster: Cluster, @@ -16,7 +17,7 @@ impl NetworkDatabase { let mut conn = self.connection()?; let tx = conn.transaction()?; - // Insert the top level cluster data and associated validator metadata + // Insert the top level cluster data if it does not exist, and the associated validator metadata tx.prepare_cached(SQL[&SqlStatement::InsertCluster])? .execute(params![ *cluster.cluster_id, // cluster id @@ -31,7 +32,7 @@ impl NetworkDatabase { validator.graffiti.0.as_slice(), // graffiti ])?; - // Records our shares if one belongs to use + // Record shares if one belongs to the current operator let mut our_share = None; let own_id = self.state.single_state.id.load(Ordering::Relaxed); @@ -41,7 +42,7 @@ impl NetworkDatabase { our_share = Some(share); } - // insert the cluster member and the share + // Insert the cluster member and the share tx.prepare_cached(SQL[&SqlStatement::InsertClusterMember])? .execute(params![*share.cluster_id, *share.operator_id])?; self.insert_share(&tx, share, &validator.public_key) @@ -88,11 +89,11 @@ impl NetworkDatabase { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateClusterStatus])? .execute(params![ - status, // status of the cluster (liquidated or active) + status, // status of the cluster (liquidated = false, active = true) *cluster_id // Id of the cluster ])?; - // get and update the cluster if we are a part of it + // Update in memory status of cluster if let Some(mut cluster) = self.clusters().get_by(&cluster_id) { cluster.liquidated = status; self.clusters().update(&cluster_id, cluster); @@ -105,11 +106,12 @@ impl NetworkDatabase { /// data for this validator. If this validator is the last one in the cluster, the cluster /// and all corresponding cluster members will also be removed pub fn delete_validator(&self, validator_pubkey: &PublicKey) -> Result<(), DatabaseError> { + // Remove from database let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::DeleteValidator])? .execute(params![validator_pubkey.to_string()])?; - // remove the validators share and its metadata + // Remove from in memory self.shares().remove(validator_pubkey); let metadata = self .metadata() diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index cd15a5f0..3fe928f8 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -26,35 +26,36 @@ mod validator_operations; #[cfg(test)] mod tests; -type Pool = r2d2::Pool; -type PoolConn = r2d2::PooledConnection; const POOL_SIZE: u32 = 1; const CONNECTION_TIMEOUT: Duration = Duration::from_secs(5); +type Pool = r2d2::Pool; +type PoolConn = r2d2::PooledConnection; + +/// All of the shares that belong to the current operator +/// Primary: public key of validator. uniquely identifies share +/// Secondary: cluster id. corresponds to a list of shares +/// Tertiary: owner of the cluster. corresponds to a list of shares pub(crate) type ShareMultiIndexMap = MultiIndexMap; +/// Metadata for all validators in the network +/// Primary: public key of the validator. uniquely identifies the metadata +/// Secondary: cluster id. corresponds to list of metadata for all validators +/// Tertiary: owner of the cluster: corresponds to list of metadata for all validators pub(crate) type MetadataMultiIndexMap = MultiIndexMap; +/// All of the clusters in the network +/// Primary: cluster id. uniquely identifies a cluster +/// Secondary: public key of the validator. uniquely identifies a cluster +/// Tertiary: owner of the cluster. uniquely identifies a cluster pub(crate) type ClusterMultiIndexMap = MultiIndexMap; -// Information that needs to be accesses via multiple different indicies +// Information that needs to be accessed via multiple different indicies #[derive(Debug)] struct MultiState { - /// All of the shares that belong to use - /// Primary: public key of validator. uniquely identifies share - /// Secondary: cluster id. corresponds to a list of shares - /// Tertiary: owner of the cluster. corresponds to a list of shares shares: ShareMultiIndexMap, - /// Metadata for all validators in the network - /// Primary: public key of the validator. uniquely identifies the metadata - /// Secondary: cluster id. corresponds to list of metadata for all validators - /// Tertiary: owner of the cluster: corresponds to list of metadata for all validators validator_metadata: MetadataMultiIndexMap, - /// All of the clusters in the network - /// Primary: cluster id. uniquely identifies a cluster - /// Secondary: public key of the validator. uniquely identifies a cluster - /// Tertiary: owner of the cluster. uniquely identifies a cluster clusters: ClusterMultiIndexMap, } @@ -62,7 +63,7 @@ struct MultiState { #[derive(Debug, Default)] struct SingleState { /// The ID of our own operator. This is determined via events when the operator is - /// registered with the network. Therefore, this may not be available right away if the client + /// registered with the network. Therefore, this may not be available right away if the operator /// is running but has not been registered with the network contract yet. id: AtomicU64, /// The last block that was processed @@ -93,7 +94,7 @@ pub struct NetworkDatabase { } impl NetworkDatabase { - /// Construct a new NetworkDatabase at the given path and the Public Key of our operator. + /// Construct a new NetworkDatabase at the given path and the Public Key of the current operator pub fn new(path: &Path, pubkey: &Rsa) -> Result { let conn_pool = Self::open_or_create(path)?; let state = NetworkState::new_with_state(&conn_pool, pubkey)?; diff --git a/anchor/database/src/operator_operations.rs b/anchor/database/src/operator_operations.rs index 665e1aec..74e47426 100644 --- a/anchor/database/src/operator_operations.rs +++ b/anchor/database/src/operator_operations.rs @@ -6,9 +6,9 @@ use std::sync::atomic::Ordering; /// Implements all operator related functionality on the database impl NetworkDatabase { - /// Insert a new operator into the database + /// Insert a new Operator into the database pub fn insert_operator(&self, operator: &Operator) -> Result<(), DatabaseError> { - // make sure that this operator does not already exist + // 1ake sure that this operator does not already exist if self.operator_exists(&operator.id) { return Err(DatabaseError::NotFound(format!( "Operator with id {} already in database", @@ -16,7 +16,7 @@ impl NetworkDatabase { ))); } - // base64 encode the key for storage + // Base64 encode the key for storage let pem_key = operator .rsa_pubkey .public_key_to_pem() @@ -27,15 +27,15 @@ impl NetworkDatabase { let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::InsertOperator])? .execute(params![ - *operator.id, // the id of the registered operator + *operator.id, // The id of the registered operator encoded, // RSA public key - operator.owner.to_string() // the owner address of the operator + operator.owner.to_string() // The owner address of the operator ])?; - // Check to see if this operator is us and insert it into memory + // Check to see if this operator is the current operator let own_id = self.state.single_state.id.load(Ordering::Relaxed); if own_id == u64::MAX { - // if the keys match, this is us so we want to save the id + // If the keys match, this is the current operator so we want to save the id let keys_match = pem_key == self.pubkey.public_key_to_pem().unwrap_or_default(); if keys_match { self.state @@ -44,7 +44,7 @@ impl NetworkDatabase { .store(*operator.id, Ordering::Relaxed); } } - // store the operator + // Store the operator in memory self.state .single_state .operators @@ -54,7 +54,7 @@ impl NetworkDatabase { /// Delete an operator pub fn delete_operator(&self, id: OperatorId) -> Result<(), DatabaseError> { - // make sure that this operator exists + // Make sure that this operator exists if !self.operator_exists(&id) { return Err(DatabaseError::NotFound(format!( "Operator with id {} not in database", diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index ecbceef5..2ff6d3d0 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -18,7 +18,7 @@ pub(crate) enum SqlStatement { InsertValidator, // Insert a Validator into the database DeleteValidator, // Delete a Validator from the database - GetAllValidators, // Get all Validators for state reconstructions + GetAllValidators, // Get all Validators for state reconstruction InsertShare, // Insert a KeyShare into the database GetShares, // Get the releveant keyshare for a validator @@ -125,20 +125,5 @@ pub(crate) static SQL: LazyLock> = LazyLock: "SELECT block_number FROM block", ); - /* - m.insert( - SqlStatement::GetValidatorAndShares, - "SELECT - v.validator_pubkey, - v.cluster_id, - v.validator_index, - v.graffiti, - s.share_pubkey, - s.encrypted_key, - s.operator_id - FROM validators v - JOIN shares s ON v.validator_pubkey = s.validator_pubkey", - ); - */ m }); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index a178258a..34549989 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -43,14 +43,13 @@ impl NetworkState { }; // First Phase: Fetch data from the database - // Two main data structures for state reconstruction - // 1) ClusterId -> Cluster - // 2) ClusterId -> Vec - // 3) ClusterId -> Shares - // This simplifies data reconstruction and makes it easy to add more customized stores in the future + // 1) OperatorId -> Operator let operators = Self::fetch_operators(&conn)?; + // 2) ClusterId -> Cluster let cluster_map = Self::fetch_clusters(&conn)?; + // 3) ClusterId -> Vec let validator_map = Self::fetch_validators(&conn)?; + // 4) ClusterId -> Vec let share_map = Self::fetch_shares(&conn, id)?; // Second phase: Populate all in memory stores with data; @@ -66,6 +65,7 @@ impl NetworkState { // Populate all multi-index maps in a single pass through clusters for (cluster_id, cluster) in &cluster_map { + // Get all the validator for this cluster let validators = validator_map .get(cluster_id) .expect("Validator for cluster must exist"); @@ -86,7 +86,7 @@ impl NetworkState { validator.clone(), ); - // Process shares if they exist for this cluster + // Process this validators shares if let Some(shares) = share_map.get(cluster_id) { for share in shares { if share.validator_pubkey == validator.public_key { @@ -141,7 +141,7 @@ impl NetworkState { let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllOperators])?; let operators = stmt .query_map([], |row| { - // Transform row into an operator and colleciton into HashMap + // Transform row into an operator and collect into HashMap let operator: Operator = row.try_into()?; Ok((operator.id, operator)) })? @@ -168,7 +168,7 @@ impl NetworkState { Ok(map) } - // Fetch and transform cluster data for a specific operator + // Fetch and transform cluster data from the database fn fetch_clusters(conn: &PoolConn) -> Result, DatabaseError> { let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllClusters])?; let clusters = stmt @@ -202,7 +202,7 @@ impl NetworkState { members.collect() } - // Fetch the shares that this operators owns + // Fetch the shares for a specific operator fn fetch_shares( conn: &PoolConn, id: OperatorId, diff --git a/anchor/database/src/tests/utils.rs b/anchor/database/src/tests/utils.rs index 978df93b..a822d9f0 100644 --- a/anchor/database/src/tests/utils.rs +++ b/anchor/database/src/tests/utils.rs @@ -25,7 +25,7 @@ pub struct TestFixture { } impl TestFixture { - // Generate a database that is populated with a full cluster. This operator is a prt of the + // Generate a database that is populated with a full cluster. This operator is a part of the // cluster, so membership data should be saved pub fn new() -> Self { // generate the operators and pick the first one to be us @@ -80,7 +80,7 @@ impl TestFixture { } } - // Generate an emtpy database and pick a random public key to be us + // Generate an empty database and pick a random public key to be us pub fn new_empty() -> Self { let temp_dir = TempDir::new().expect("Failed to create temporary directory"); let db_path = temp_dir.path().join("test.db"); diff --git a/anchor/database/src/validator_operations.rs b/anchor/database/src/validator_operations.rs index 55d481e0..ee4a8930 100644 --- a/anchor/database/src/validator_operations.rs +++ b/anchor/database/src/validator_operations.rs @@ -2,7 +2,7 @@ use crate::{multi_index::UniqueIndex, DatabaseError, NetworkDatabase, SqlStateme use rusqlite::params; use types::{Address, Graffiti, PublicKey}; -/// Implements all validator related database functionality +/// Implements all validator specific database functionality impl NetworkDatabase { /// Update the fee recipient address for all validators in a cluster pub fn update_fee_recipient( @@ -12,6 +12,7 @@ impl NetworkDatabase { ) -> Result<(), DatabaseError> { // Make sure the cluster exists by getting the in memory entry if let Some(mut cluster) = self.clusters().get_by(&owner) { + // Update the database let conn = self.connection()?; conn.prepare_cached(SQL[&SqlStatement::UpdateFeeRecipient])? .execute(params![ @@ -19,7 +20,7 @@ impl NetworkDatabase { owner.to_string() // Owner of the cluster ])?; - // Update recipient address on the entry and update it in the map + // Update in memory cluster.fee_recipient = fee_recipient; self.clusters() .update(&cluster.cluster_id, cluster.to_owned()); @@ -43,7 +44,7 @@ impl NetworkDatabase { validator_pubkey.to_string() // The public key of the validator ])?; - // Update the Graffifi field on the entry and update it in the map + // Update in memory validator.graffiti = graffiti; self.metadata().update(validator_pubkey, validator); } From 10f014fc33f8d5356f1a39a4de460747c06e8aaf Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 21:20:44 +0000 Subject: [PATCH 47/50] nonce logic --- Cargo.lock | 163 +++++++++++----------- anchor/database/src/cluster_operations.rs | 26 +++- anchor/database/src/lib.rs | 2 + anchor/database/src/sql_operations.rs | 10 ++ anchor/database/src/state.rs | 31 +++- anchor/database/src/table_schema.sql | 5 + 6 files changed, 150 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 19eac3e8..8145fb1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -169,9 +169,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.16" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0540fd0355d400b59633c27bd4b42173e59943f28e9d3376b77a24771d432d04" +checksum = "788bb18e8f61d5d9340b52143f27771daf7e1dccbaf2741621d2493f9debf52e" dependencies = [ "alloy-rlp", "arbitrary", @@ -183,7 +183,6 @@ dependencies = [ "foldhash", "getrandom", "hashbrown 0.15.2", - "hex-literal", "indexmap", "itoa", "k256 0.13.4", @@ -218,7 +217,7 @@ checksum = "5a833d97bf8a5f0f878daf2c8451fff7de7f9de38baa5a45d936ec718d81255a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -493,7 +492,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -505,7 +504,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -557,13 +556,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.84" +version = "0.1.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1244b10dcd56c92219da4e14caa97e312079e185f04ba3eea25061561dc0a0" +checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -598,7 +597,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -896,7 +895,7 @@ dependencies = [ [[package]] name = "bls" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "arbitrary", @@ -1136,7 +1135,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1165,7 +1164,7 @@ dependencies = [ [[package]] name = "clap_utils" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "clap", @@ -1229,7 +1228,7 @@ dependencies = [ [[package]] name = "compare_fields" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "itertools 0.10.5", ] @@ -1246,7 +1245,7 @@ dependencies = [ [[package]] name = "compare_fields_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "quote", "syn 1.0.109", @@ -1537,7 +1536,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1585,7 +1584,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1607,7 +1606,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1798,7 +1797,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1809,7 +1808,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -1830,7 +1829,7 @@ dependencies = [ "convert_case", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "unicode-xid", ] @@ -1868,7 +1867,7 @@ dependencies = [ [[package]] name = "directory" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "clap", "clap_utils 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2011,7 +2010,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2172,7 +2171,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2301,7 +2300,7 @@ dependencies = [ [[package]] name = "eth2_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "paste", "types 0.2.1 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2323,7 +2322,7 @@ dependencies = [ [[package]] name = "eth2_interop_keypairs" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bls 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "ethereum_hashing", @@ -2391,7 +2390,7 @@ dependencies = [ [[package]] name = "eth2_network_config" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bytes", "discv5 0.9.0", @@ -2558,7 +2557,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -2806,7 +2805,7 @@ dependencies = [ [[package]] name = "fixed_bytes" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "safe_arith 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -2972,7 +2971,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -3110,7 +3109,7 @@ checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -3152,7 +3151,7 @@ dependencies = [ [[package]] name = "gossipsub" version = "0.5.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "async-channel", "asynchronous-codec", @@ -3333,12 +3332,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-literal" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" - [[package]] name = "hex_fmt" version = "0.3.0" @@ -3770,7 +3763,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -3905,7 +3898,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -3949,7 +3942,7 @@ dependencies = [ [[package]] name = "int_to_bytes" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "bytes", ] @@ -4145,7 +4138,7 @@ dependencies = [ [[package]] name = "kzg" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "arbitrary", "c-kzg", @@ -4582,7 +4575,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -4785,7 +4778,7 @@ dependencies = [ [[package]] name = "lighthouse_network" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -4843,7 +4836,7 @@ dependencies = [ [[package]] name = "lighthouse_version" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "git-version", "target_info", @@ -4922,7 +4915,7 @@ dependencies = [ [[package]] name = "logging" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "chrono", "metrics 0.2.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -4970,7 +4963,7 @@ dependencies = [ [[package]] name = "lru_cache" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "fnv", ] @@ -5046,7 +5039,7 @@ dependencies = [ [[package]] name = "merkle_proof" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -5088,7 +5081,7 @@ dependencies = [ [[package]] name = "metrics" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "prometheus", ] @@ -5537,7 +5530,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -5803,29 +5796,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -5930,7 +5923,7 @@ dependencies = [ [[package]] name = "pretty_reqwest_error" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "reqwest", "sensitive_url 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", @@ -6065,7 +6058,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -6096,7 +6089,7 @@ checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -6858,7 +6851,7 @@ source = "git+https://github.com/agemanning/lighthouse?branch=modularize-vc#75a5 [[package]] name = "safe_arith" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" [[package]] name = "salsa20" @@ -6890,7 +6883,7 @@ dependencies = [ "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -7032,7 +7025,7 @@ dependencies = [ [[package]] name = "sensitive_url" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "serde", "url", @@ -7065,7 +7058,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -7098,7 +7091,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -7654,7 +7647,7 @@ dependencies = [ [[package]] name = "swap_or_not_shuffle" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -7674,9 +7667,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.94" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "987bc0be1cdea8b10216bd06e2ca407d40b9543468fafd3ddfb02f36e77f71f3" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -7703,7 +7696,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -7783,7 +7776,7 @@ dependencies = [ [[package]] name = "task_executor" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "async-channel", "futures", @@ -7842,7 +7835,7 @@ dependencies = [ [[package]] name = "test_random_derive" version = "0.2.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "quote", "syn 1.0.109", @@ -7874,7 +7867,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -7885,7 +7878,7 @@ checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -8026,7 +8019,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -8189,7 +8182,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -8270,7 +8263,7 @@ dependencies = [ "darling 0.20.10", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -8357,7 +8350,7 @@ dependencies = [ [[package]] name = "types" version = "0.2.1" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8535,7 +8528,7 @@ dependencies = [ [[package]] name = "unused_port" version = "0.1.0" -source = "git+https://github.com/sigp/lighthouse?branch=unstable#7e0cddef321c2a069582c65b58e5f46590d60c49" +source = "git+https://github.com/sigp/lighthouse?branch=unstable#f51a292f77575a1786af34271fb44954f141c377" dependencies = [ "lru_cache 0.1.0 (git+https://github.com/sigp/lighthouse?branch=unstable)", "parking_lot 0.12.3", @@ -8739,7 +8732,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -8774,7 +8767,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9281,7 +9274,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -9303,7 +9296,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -9323,7 +9316,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", "synstructure", ] @@ -9345,7 +9338,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] @@ -9367,7 +9360,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.94", + "syn 2.0.95", ] [[package]] diff --git a/anchor/database/src/cluster_operations.rs b/anchor/database/src/cluster_operations.rs index addf7965..30919a7c 100644 --- a/anchor/database/src/cluster_operations.rs +++ b/anchor/database/src/cluster_operations.rs @@ -2,7 +2,7 @@ use super::{DatabaseError, NetworkDatabase, NonUniqueIndex, SqlStatement, Unique use rusqlite::params; use ssv_types::{Cluster, ClusterId, Share, ValidatorMetadata}; use std::sync::atomic::Ordering; -use types::PublicKey; +use types::{Address, PublicKey}; /// Implements all cluster related functionality on the database impl NetworkDatabase { @@ -130,4 +130,28 @@ impl NetworkDatabase { Ok(()) } + + /// Bump the nonce of the owner + pub fn bump_nonce(&self, owner: &Address) -> Result<(), DatabaseError> { + // bump the nonce in the db + let conn = self.connection()?; + conn.prepare_cached(SQL[&SqlStatement::BumpNonce])? + .execute(params![owner.to_string()])?; + + // bump the nonce in memory + if !self.state.single_state.nonces.contains_key(owner) { + // if it does not yet exist in memory, then create an entry and set it to one + self.state.single_state.nonces.insert(*owner, 1); + } else { + // otherwise, just increment the entry + let mut entry = self + .state + .single_state + .nonces + .get_mut(owner) + .expect("This must exist"); + *entry += 1; + } + Ok(()) + } } diff --git a/anchor/database/src/lib.rs b/anchor/database/src/lib.rs index 3fe928f8..859db7d4 100644 --- a/anchor/database/src/lib.rs +++ b/anchor/database/src/lib.rs @@ -72,6 +72,8 @@ struct SingleState { operators: DashMap, /// All of the Clusters that we are a memeber of clusters: DashSet, + /// Nonce of the owner account + nonces: DashMap, } // Container to hold all network state diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index 2ff6d3d0..cb2219f7 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -28,6 +28,9 @@ pub(crate) enum SqlStatement { UpdateBlockNumber, // Update the last block that the database has processed GetBlockNumber, // Get the last block that the database has processed + + GetAllNonces, // Fetch all the Nonce values for every Owner + BumpNonce, // Bump the nonce value for an Owner } pub(crate) static SQL: LazyLock> = LazyLock::new(|| { @@ -125,5 +128,12 @@ pub(crate) static SQL: LazyLock> = LazyLock: "SELECT block_number FROM block", ); + // Nonce + m.insert(SqlStatement::GetAllNonces, "SELECT * FROM nonce"); + m.insert( + SqlStatement::BumpNonce, + "UPDATE nonce SET nonce = nonce + 1 WHERE owner = ?1", + ); + m }); diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 34549989..323bef4a 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -7,11 +7,14 @@ use dashmap::{DashMap, DashSet}; use openssl::pkey::Public; use openssl::rsa::Rsa; use rusqlite::{params, OptionalExtension}; +use rusqlite::{types::Type, Error as SqlError}; use ssv_types::{ Cluster, ClusterId, ClusterMember, Operator, OperatorId, Share, ValidatorMetadata, }; use std::collections::HashMap; +use std::str::FromStr; use std::sync::atomic::{AtomicU64, Ordering}; +use types::Address; impl NetworkState { /// Build the network state from the database data @@ -51,6 +54,8 @@ impl NetworkState { let validator_map = Self::fetch_validators(&conn)?; // 4) ClusterId -> Vec let share_map = Self::fetch_shares(&conn, id)?; + // 5) Owner -> Nonce (u16) + let nonce_map = Self::fetch_nonces(&conn)?; // Second phase: Populate all in memory stores with data; let shares_multi: ShareMultiIndexMap = MultiIndexMap::new(); @@ -61,6 +66,7 @@ impl NetworkState { last_processed_block: AtomicU64::new(last_processed_block), operators: DashMap::from_iter(operators), clusters: DashSet::from_iter(cluster_map.keys().copied()), + nonces: DashMap::from_iter(nonce_map), }; // Populate all multi-index maps in a single pass through clusters @@ -221,9 +227,27 @@ impl NetworkState { } Ok(map) } + + // Fetch all of the owner nonce pairs + fn fetch_nonces(conn: &PoolConn) -> Result, DatabaseError> { + let mut stmt = conn.prepare(SQL[&SqlStatement::GetAllNonces])?; + let nonces = stmt + .query_map([], |row| { + // Get the owner from column 0 + let owner_str = row.get::<_, String>(0)?; + let owner = Address::from_str(&owner_str) + .map_err(|e| SqlError::FromSqlConversionFailure(1, Type::Text, Box::new(e)))?; + + // Get he nonce from column 1 + let nonce = row.get::<_, u16>(1)?; + Ok((owner, nonce)) + })? + .map(|result| result.map_err(DatabaseError::from)); + nonces.collect() + } } -// Interface for accessing state data +// Interface over state data impl NetworkDatabase { /// Get a reference to the shares map pub fn shares(&self) -> &ShareMultiIndexMap { @@ -272,4 +296,9 @@ impl NetworkDatabase { .last_processed_block .load(Ordering::Relaxed) } + + /// Get the nonce of the owner if it exists + pub fn get_nonce(&self, owner: &Address) -> Option { + self.state.single_state.nonces.get(owner).map(|v| *v) + } } diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 5ad23650..8ae7e6ff 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -3,6 +3,11 @@ CREATE TABLE block ( ); INSERT INTO block (block_number) VALUES (0); +CREATE TABLE nonce ( + owner TEXT NOT NULL, + nonce INTEGER DEFAULT 0 +); + CREATE TABLE operators ( operator_id INTEGER PRIMARY KEY, public_key TEXT NOT NULL, From 1f246e6dde5195f07775205906f1a2cc86b9771a Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Mon, 6 Jan 2025 22:04:28 +0000 Subject: [PATCH 48/50] nonce insertion fix and tests --- anchor/database/src/sql_operations.rs | 3 +- anchor/database/src/state.rs | 9 ++++-- anchor/database/src/table_schema.sql | 2 +- anchor/database/src/tests/state_tests.rs | 37 ++++++++++++++++++++++++ 4 files changed, 47 insertions(+), 4 deletions(-) diff --git a/anchor/database/src/sql_operations.rs b/anchor/database/src/sql_operations.rs index cb2219f7..fb182294 100644 --- a/anchor/database/src/sql_operations.rs +++ b/anchor/database/src/sql_operations.rs @@ -132,7 +132,8 @@ pub(crate) static SQL: LazyLock> = LazyLock: m.insert(SqlStatement::GetAllNonces, "SELECT * FROM nonce"); m.insert( SqlStatement::BumpNonce, - "UPDATE nonce SET nonce = nonce + 1 WHERE owner = ?1", + "INSERT INTO nonce (owner, nonce) VALUES (?1, 1) + ON CONFLICT (owner) DO UPDATE SET nonce = nonce + 1", ); m diff --git a/anchor/database/src/state.rs b/anchor/database/src/state.rs index 323bef4a..1bf491b7 100644 --- a/anchor/database/src/state.rs +++ b/anchor/database/src/state.rs @@ -298,7 +298,12 @@ impl NetworkDatabase { } /// Get the nonce of the owner if it exists - pub fn get_nonce(&self, owner: &Address) -> Option { - self.state.single_state.nonces.get(owner).map(|v| *v) + pub fn get_nonce(&self, owner: &Address) -> u16 { + self.state + .single_state + .nonces + .get(owner) + .map(|v| *v) + .unwrap_or(0) } } diff --git a/anchor/database/src/table_schema.sql b/anchor/database/src/table_schema.sql index 8ae7e6ff..ae682429 100644 --- a/anchor/database/src/table_schema.sql +++ b/anchor/database/src/table_schema.sql @@ -4,7 +4,7 @@ CREATE TABLE block ( INSERT INTO block (block_number) VALUES (0); CREATE TABLE nonce ( - owner TEXT NOT NULL, + owner TEXT NOT NULL PRIMARY KEY, nonce INTEGER DEFAULT 0 ); diff --git a/anchor/database/src/tests/state_tests.rs b/anchor/database/src/tests/state_tests.rs index 34652671..9e3b6a96 100644 --- a/anchor/database/src/tests/state_tests.rs +++ b/anchor/database/src/tests/state_tests.rs @@ -118,4 +118,41 @@ mod state_database_tests { .expect("Failed to create database"); assert_eq!(fixture.db.get_last_processed_block(), 10); } + + #[test] + // Test to make sure we can retrieve and increment a nonce + fn test_retrieve_increment_nonce() { + let fixture = TestFixture::new(); + let owner = Address::random(); + + // this is the first time getting the nonce, so it should be zero + let nonce = fixture.db.get_nonce(&owner); + assert_eq!(nonce, 0); + + // increment the nonce and then confirm that is is one + fixture + .db + .bump_nonce(&owner) + .expect("Failed in increment nonce"); + let nonce = fixture.db.get_nonce(&owner); + assert_eq!(nonce, 1); + } + + #[test] + // Test to make sure a nonce persists after a restart + fn test_nonce_after_restart() { + let mut fixture = TestFixture::new(); + let owner = Address::random(); + fixture + .db + .bump_nonce(&owner) + .expect("Failed in increment nonce"); + + drop(fixture.db); + fixture.db = NetworkDatabase::new(&fixture.path, &fixture.pubkey) + .expect("Failed to create database"); + + // confirm that nonce is 1 + assert_eq!(fixture.db.get_nonce(&owner), 1); + } } From 661c1485d9b00a6d7c6fa67abc6f7b7bcb356ecd Mon Sep 17 00:00:00 2001 From: Zac Holme <79027434+Zacholme7@users.noreply.github.com> Date: Tue, 7 Jan 2025 17:08:18 -0600 Subject: [PATCH 49/50] spelling --- anchor/database/src/multi_index.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/anchor/database/src/multi_index.rs b/anchor/database/src/multi_index.rs index a8794566..92cf3ec5 100644 --- a/anchor/database/src/multi_index.rs +++ b/anchor/database/src/multi_index.rs @@ -49,7 +49,7 @@ where /// A concurrent multi-index map that supports up to three different access patterns. /// The core differentiates between unique identification and non unique identification. The primary /// index is forced to always uniquely identify the value. The secondary and tertiary indicies have -/// more flexibility. They key may non uniquely identify many different values, or uniquely identify +/// more flexibility. The key may non uniquely identify many different values, or uniquely identify /// a single value /// /// Example: A share is uniquely identified by the Validators public key that it belongs too. A From 5a5c7a13d3c5770ef91f8d1e5fca885207e3f1f8 Mon Sep 17 00:00:00 2001 From: Zacholme7 Date: Wed, 8 Jan 2025 21:52:50 +0000 Subject: [PATCH 50/50] initial README draft --- anchor/database/README.md | 64 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 anchor/database/README.md diff --git a/anchor/database/README.md b/anchor/database/README.md new file mode 100644 index 00000000..6f7908c8 --- /dev/null +++ b/anchor/database/README.md @@ -0,0 +1,64 @@ +# Anchor Database + +The Anchor Database provides a robust persistent and in-memory caching layer for the Anchor project, specifically designed to handle SSV Network data efficiently. This crate manages both persistent storage of blockchain event data and high-performance in-memory access patterns. + +## Table of Contents + +1. [Overview](#overview) +2. [Core Features](#core) +3. [Architecture](#Architecture) +4. [Data Models](#Data) + +## Overview + +The Anchor Database serves as the backbone for storing and accessing SSV Network event data. When an Anchor node starts up, it needs to process and store blockchain event logs to maintain state. + +## Core Features +* **Persistent Storage**: SQLite-based store with automatic schema management +* **In-Memory Caching**: Efficient caching of frequently accessed data +* **Multi-Index Access**: Flexible data access patters through multiple different keys +* **Automatic State Recovery**: Rebuilds in-memory state from persistent storage on startup. +* **Thread Safety**: Concurrent access support through DashMap implementations + + +## Architecture +The database architecture consists of a two key layers + +### Storage Layer + +At the foundation lies a SQLite database that provides persistent storage. This layer encompasses +* **Database Connection Management**: A conneciton pool that maintains and resuses SQLite connections efficiently, preventing resource exhaustion while ensuring consistent access +* **Schema and Transaction Management**: Automatic table creation and transaction support for data integrity + + +### Cache Layer +The in-memory cache layer combines high-performance caching with sophisticated indexing through a unified system. Is is broken up into Single-State and Multi-State. + +* **Single State**: Single state handles straightforward, one-to-one relationships where data only needs one access pattern. This is ideal for data that is frequenlty access but has simple relationships. +* **Multi State**: Multi State handles complex relationships where the same data needs to be accessed through different keys. This is implemented through a series of MultiIndexMaps, each supporting three different access patterns for the same data. The type system enforces correct usage through the UniqueTag and NonUniqueTag markers, preventing incorrect access patterns at compile time. Each MultiIndexMap in the Multi State provides three ways to access its data: + 1) A primary key that uniquely identifies each piece of data + 2) A secondary key that can either uniquely identify data or map to multiple items + 3) A tertiary key that can also be unique or map to multiple items + +## Data Models +The database handles several core data types + +**Operator** +* Represents a network operator +* Identified by OperatorId +* Constains RSA public key and owner address + +**Cluster** +* Represents a group of Operators managing validators +* Contains cluster membership information +* Tracks operational status and fault counts + +**Validator** +* Contains validator metadata +* Links to cluster membership +* Stores configuration data + +**Share** +* Represents cryptographic shares for validators +* Links operators to validators +* Contains encrypted key data