From a14c865c909cf28f90d97580cf151280731e85ef Mon Sep 17 00:00:00 2001 From: morph <82043364+morph-dev@users.noreply.github.com> Date: Mon, 19 Aug 2024 23:52:41 +0300 Subject: [PATCH 1/3] feat: use weight for setting storage capacity per network --- src/lib.rs | 15 ++++--- trin-history/src/storage.rs | 24 +++-------- trin-storage/src/config.rs | 79 ++++++++++++++++++++++++++++++++++ trin-storage/src/lib.rs | 35 ++------------- trin-storage/src/test_utils.rs | 13 ++++-- 5 files changed, 106 insertions(+), 60 deletions(-) create mode 100644 trin-storage/src/config.rs diff --git a/src/lib.rs b/src/lib.rs index 063819722..91af36374 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,7 +22,7 @@ use tree_hash::TreeHash; use trin_beacon::initialize_beacon_network; use trin_history::initialize_history_network; use trin_state::initialize_state_network; -use trin_storage::PortalStorageConfig; +use trin_storage::PortalStorageConfigFactory; use trin_utils::version::get_trin_version; use trin_validation::oracle::HeaderOracle; use utp_rs::socket::UtpSocket; @@ -93,10 +93,11 @@ pub async fn run_trin( let utp_socket = UtpSocket::with_socket(discv5_utp_socket); let utp_socket = Arc::new(utp_socket); - let storage_config = PortalStorageConfig::new( - trin_config.mb.into(), - node_data_dir, + let storage_config_factory = PortalStorageConfigFactory::new( + trin_config.mb as u64, + &trin_config.portal_subnetworks, discovery.local_enr().node_id(), + node_data_dir, )?; // Initialize state sub-network service and event handlers, if selected @@ -109,7 +110,7 @@ pub async fn run_trin( &discovery, utp_socket.clone(), portalnet_config.clone(), - storage_config.clone(), + storage_config_factory.create(STATE_NETWORK), header_oracle.clone(), ) .await? @@ -132,7 +133,7 @@ pub async fn run_trin( &discovery, utp_socket.clone(), portalnet_config.clone(), - storage_config.clone(), + storage_config_factory.create(BEACON_NETWORK), header_oracle.clone(), ) .await? @@ -155,7 +156,7 @@ pub async fn run_trin( &discovery, utp_socket.clone(), portalnet_config.clone(), - storage_config.clone(), + storage_config_factory.create(HISTORY_NETWORK), header_oracle.clone(), ) .await? diff --git a/trin-history/src/storage.rs b/trin-history/src/storage.rs index 7448a5f82..2ac3a06c2 100644 --- a/trin-history/src/storage.rs +++ b/trin-history/src/storage.rs @@ -80,34 +80,22 @@ impl HistoryStorage { #[cfg(test)] #[allow(clippy::unwrap_used)] pub mod test { - use std::path::PathBuf; - - use discv5::enr::{CombinedKey, Enr as Discv5Enr, NodeId}; use ethportal_api::{BlockHeaderKey, HistoryContentKey}; - use portalnet::utils::db::{configure_node_data_dir, setup_temp_dir}; use quickcheck::{QuickCheck, TestResult}; use rand::RngCore; use serial_test::serial; + use trin_storage::test_utils::create_test_portal_storage_config_with_capacity; use super::*; const CAPACITY_MB: u64 = 2; - fn get_active_node_id(temp_dir: PathBuf) -> NodeId { - let (_, mut pk) = configure_node_data_dir(temp_dir, None, "test".to_string()).unwrap(); - let pk = CombinedKey::secp256k1_from_bytes(pk.0.as_mut_slice()).unwrap(); - Discv5Enr::empty(&pk).unwrap().node_id() - } - #[test_log::test(tokio::test)] #[serial] async fn test_store() { fn test_store_random_bytes() -> TestResult { - let temp_dir = setup_temp_dir().unwrap(); - let node_id = get_active_node_id(temp_dir.path().to_path_buf()); - let storage_config = - PortalStorageConfig::new(CAPACITY_MB, temp_dir.path().to_path_buf(), node_id) - .unwrap(); + let (temp_dir, storage_config) = + create_test_portal_storage_config_with_capacity(CAPACITY_MB).unwrap(); let mut storage = HistoryStorage::new(storage_config).unwrap(); let content_key = HistoryContentKey::random().unwrap(); let mut value = [0u8; 32]; @@ -127,10 +115,8 @@ pub mod test { #[test_log::test(tokio::test)] #[serial] async fn test_get_data() -> Result<(), ContentStoreError> { - let temp_dir = setup_temp_dir().unwrap(); - let node_id = get_active_node_id(temp_dir.path().to_path_buf()); - let storage_config = - PortalStorageConfig::new(CAPACITY_MB, temp_dir.path().to_path_buf(), node_id).unwrap(); + let (temp_dir, storage_config) = + create_test_portal_storage_config_with_capacity(CAPACITY_MB).unwrap(); let mut storage = HistoryStorage::new(storage_config)?; let content_key = HistoryContentKey::BlockHeaderWithProof(BlockHeaderKey::default()); let value: Vec = "OGFWs179fWnqmjvHQFGHszXloc3Wzdb4".into(); diff --git a/trin-storage/src/config.rs b/trin-storage/src/config.rs new file mode 100644 index 000000000..27bf133ae --- /dev/null +++ b/trin-storage/src/config.rs @@ -0,0 +1,79 @@ +use std::path::PathBuf; + +use discv5::enr::NodeId; +use ethportal_api::types::cli::{BEACON_NETWORK, HISTORY_NETWORK, STATE_NETWORK}; +use r2d2::Pool; +use r2d2_sqlite::SqliteConnectionManager; + +use crate::{error::ContentStoreError, utils::setup_sql, DistanceFunction}; + +/// Factory for creating [PortalStorageConfig] instances +pub struct PortalStorageConfigFactory { + node_id: NodeId, + node_data_dir: PathBuf, + total_capacity_mb: u64, + total_capacity_weight: u64, + sql_connection_pool: Pool, +} + +impl PortalStorageConfigFactory { + const HISTORY_CAPACITY_WEIGHT: u64 = 1; + const STATE_CAPACITY_WEIGHT: u64 = 100; + const BEACON_CAPACITY_WEIGHT: u64 = 0; // Beacon doesn't care about given capacity + + pub fn new( + total_capacity_mb: u64, + subnetworks: &[String], + node_id: NodeId, + node_data_dir: PathBuf, + ) -> Result { + let total_capacity_weight = subnetworks + .iter() + .map(|subnetwork| Self::get_capacity_weight(subnetwork)) + .sum(); + + let sql_connection_pool = setup_sql(&node_data_dir)?; + + Ok(Self { + node_data_dir, + node_id, + total_capacity_mb, + total_capacity_weight, + sql_connection_pool, + }) + } + + pub fn create(&self, subnetwork: &str) -> PortalStorageConfig { + let capacity_weight = Self::get_capacity_weight(subnetwork); + let capacity_mb = if self.total_capacity_weight == 0 { + 0 + } else { + self.total_capacity_mb * capacity_weight / self.total_capacity_weight + }; + PortalStorageConfig { + storage_capacity_mb: capacity_mb, + node_id: self.node_id, + node_data_dir: self.node_data_dir.clone(), + distance_fn: DistanceFunction::Xor, + sql_connection_pool: self.sql_connection_pool.clone(), + } + } + + fn get_capacity_weight(subnetwork: &str) -> u64 { + match subnetwork { + HISTORY_NETWORK => Self::HISTORY_CAPACITY_WEIGHT, + STATE_NETWORK => Self::STATE_CAPACITY_WEIGHT, + BEACON_NETWORK => Self::BEACON_CAPACITY_WEIGHT, + _ => panic!("Invalid subnetwork: {subnetwork}"), + } + } +} + +#[derive(Clone)] +pub struct PortalStorageConfig { + pub storage_capacity_mb: u64, + pub node_id: NodeId, + pub node_data_dir: PathBuf, + pub distance_fn: DistanceFunction, + pub sql_connection_pool: Pool, +} diff --git a/trin-storage/src/lib.rs b/trin-storage/src/lib.rs index 736daffe4..5163910d3 100644 --- a/trin-storage/src/lib.rs +++ b/trin-storage/src/lib.rs @@ -1,10 +1,10 @@ +pub mod config; pub mod error; pub mod sql; pub mod test_utils; pub mod utils; pub mod versioned; -use crate::utils::setup_sql; use alloy_primitives::B256; use discv5::enr::NodeId; use error::ContentStoreError; @@ -12,10 +12,10 @@ use ethportal_api::types::{ content_key::overlay::{IdentityContentKey, OverlayContentKey}, distance::{Distance, Metric, XorMetric}, }; -use r2d2::Pool; -use r2d2_sqlite::SqliteConnectionManager; use rusqlite::types::{FromSql, FromSqlError, ValueRef}; -use std::{ops::Deref, path::PathBuf, str::FromStr}; +use std::{ops::Deref, str::FromStr}; + +pub use config::{PortalStorageConfig, PortalStorageConfigFactory}; pub const DATABASE_NAME: &str = "trin.sqlite"; pub const BYTES_IN_MB_U64: u64 = 1000 * 1000; @@ -153,33 +153,6 @@ impl ContentStore for MemoryContentStore { } } -/// Struct for configuring a `PortalStorage` instance. -#[derive(Clone)] -pub struct PortalStorageConfig { - pub storage_capacity_mb: u64, - pub node_id: NodeId, - pub node_data_dir: PathBuf, - pub distance_fn: DistanceFunction, - pub sql_connection_pool: Pool, -} - -impl PortalStorageConfig { - pub fn new( - storage_capacity_mb: u64, - node_data_dir: PathBuf, - node_id: NodeId, - ) -> Result { - let sql_connection_pool = setup_sql(&node_data_dir)?; - Ok(Self { - storage_capacity_mb, - node_id, - node_data_dir, - distance_fn: DistanceFunction::Xor, - sql_connection_pool, - }) - } -} - #[derive(Clone, Debug, PartialEq, Eq)] pub struct ContentId(B256); diff --git a/trin-storage/src/test_utils.rs b/trin-storage/src/test_utils.rs index 6ecf7ff41..720f7e971 100644 --- a/trin-storage/src/test_utils.rs +++ b/trin-storage/src/test_utils.rs @@ -1,15 +1,22 @@ use discv5::enr::NodeId; +use ethportal_api::types::cli::HISTORY_NETWORK; use tempfile::TempDir; -use crate::{error::ContentStoreError, PortalStorageConfig}; +use crate::{error::ContentStoreError, PortalStorageConfig, PortalStorageConfigFactory}; /// Creates temporary directory and PortalStorageConfig. pub fn create_test_portal_storage_config_with_capacity( capacity_mb: u64, ) -> Result<(TempDir, PortalStorageConfig), ContentStoreError> { let temp_dir = TempDir::new()?; - let config = - PortalStorageConfig::new(capacity_mb, temp_dir.path().to_path_buf(), NodeId::random())?; + let config = PortalStorageConfigFactory::new( + capacity_mb, + &[HISTORY_NETWORK.to_string()], + NodeId::random(), + temp_dir.path().to_path_buf(), + ) + .unwrap() + .create(HISTORY_NETWORK); Ok((temp_dir, config)) } From 95ef82da7db8488335c38ed256519c14f2814c3a Mon Sep 17 00:00:00 2001 From: morph <82043364+morph-dev@users.noreply.github.com> Date: Tue, 20 Aug 2024 15:28:55 +0300 Subject: [PATCH 2/3] fix: refactor peertests so they allow custom subnetworks --- ethportal-peertest/src/lib.rs | 107 ++++++-------- ethportal-peertest/src/scenarios/basic.rs | 8 +- tests/self_peertest.rs | 133 +++++++++++++----- trin-beacon/src/storage.rs | 9 +- trin-storage/src/config.rs | 12 +- trin-storage/src/lib.rs | 1 - .../src/versioned/id_indexed_v1/config.rs | 4 +- 7 files changed, 153 insertions(+), 121 deletions(-) diff --git a/ethportal-peertest/src/lib.rs b/ethportal-peertest/src/lib.rs index 0b9baa34e..a0e28594f 100644 --- a/ethportal-peertest/src/lib.rs +++ b/ethportal-peertest/src/lib.rs @@ -3,11 +3,7 @@ pub mod scenarios; pub mod utils; -use std::{ - net::{IpAddr, Ipv4Addr}, - path::PathBuf, - thread, time, -}; +use std::{net::Ipv4Addr, path::PathBuf, thread, time}; use ethportal_api::{ types::{ @@ -59,75 +55,62 @@ async fn launch_node(trin_config: TrinConfig) -> anyhow::Result { }) } -fn generate_trin_config(id: u16, bootnode_enr: Option<&Enr>) -> TrinConfig { - let discovery_port: u16 = DEFAULT_DISCOVERY_PORT + id; - let discovery_port: String = discovery_port.to_string(); +fn generate_trin_config( + id: u16, + network: &str, + subnetworks: &str, + bootnode_enr: Option<&Enr>, +) -> TrinConfig { + let bootnodes_arg = bootnode_enr + .map(|enr| enr.to_base64()) + .unwrap_or("none".to_string()); + + let ip_addr = bootnode_enr + .map(|enr| enr.ip4().expect("bootnode must have IP")) + .unwrap_or(Ipv4Addr::new(127, 0, 0, 1)); + let discovery_port = (DEFAULT_DISCOVERY_PORT + id).to_string(); + let external_addr = format!("{ip_addr}:{discovery_port}"); + let web3_ipc_path = PathBuf::from(format!("/tmp/ethportal-peertest-buddy-{id}.ipc")); + let web3_ipc_path_str = web3_ipc_path + .to_str() + .expect("web3_ipc_path should be unicode"); + // This specific private key scheme is chosen to enforce that the first peer node will be in // the 256 kbucket of the bootnode, to ensure consistent `FindNodes` tests. let mut private_key = vec![id as u8; 3]; private_key.append(&mut vec![0u8; 29]); let private_key = hex_encode(private_key); - match bootnode_enr { - Some(enr) => { - let external_addr = format!( - "{}:{}", - enr.ip4().expect("bootnode must have IP"), - discovery_port - ); - let enr_base64 = enr.to_base64(); - let web3_ipc_path_str = web3_ipc_path.as_path().display().to_string(); - let trin_config_args = vec![ - "trin", - "--portal-subnetworks", - "history,beacon,state", - "--external-address", - external_addr.as_str(), - "--bootnodes", - enr_base64.as_str(), - "--discovery-port", - discovery_port.as_str(), - "--web3-ipc-path", - &web3_ipc_path_str[..], - "--unsafe-private-key", - private_key.as_str(), - "--ephemeral", - ]; - TrinConfig::new_from(trin_config_args.iter()).unwrap() - } - None => { - let ip_addr = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - let external_addr = format!("{ip_addr}:{discovery_port}"); - let web3_ipc_path_str = web3_ipc_path.as_path().display().to_string(); - let trin_config_args = vec![ - "trin", - "--portal-subnetworks", - "history,beacon,state", - "--external-address", - external_addr.as_str(), - "--bootnodes", - "none", - "--discovery-port", - discovery_port.as_str(), - "--web3-ipc-path", - &web3_ipc_path_str[..], - "--unsafe-private-key", - private_key.as_str(), - "--ephemeral", - ]; - TrinConfig::new_from(trin_config_args.iter()).unwrap() - } - } + + let trin_config_args = vec![ + "trin", + "--network", + network, + "--portal-subnetworks", + subnetworks, + "--bootnodes", + bootnodes_arg.as_str(), + "--discovery-port", + discovery_port.as_str(), + "--external-address", + external_addr.as_str(), + "--web3-ipc-path", + web3_ipc_path_str, + "--unsafe-private-key", + private_key.as_str(), + "--ephemeral", + ]; + TrinConfig::new_from(trin_config_args.iter()).unwrap() } -pub async fn launch_peertest_nodes(count: u16) -> Peertest { +pub async fn launch_peertest_nodes(count: u16, network: &str, subnetworks: &str) -> Peertest { // Bootnode uses a peertest id of 1 - let bootnode_config = generate_trin_config(1, None); + let bootnode_config = generate_trin_config(1, network, subnetworks, None); let bootnode = launch_node(bootnode_config).await.unwrap(); let bootnode_enr = &bootnode.enr; // All other peertest node ids begin at 2, and increment from there - let nodes = future::try_join_all((2..count + 1).map(|id| { - let node_config = generate_trin_config(id, Some(bootnode_enr)); + let nodes = future::try_join_all((2..=count).map(|id| { + let node_config = generate_trin_config(id, network, subnetworks, Some(bootnode_enr)); launch_node(node_config) })) .await diff --git a/ethportal-peertest/src/scenarios/basic.rs b/ethportal-peertest/src/scenarios/basic.rs index bb6c8c6ee..6ecc4549e 100644 --- a/ethportal-peertest/src/scenarios/basic.rs +++ b/ethportal-peertest/src/scenarios/basic.rs @@ -1,4 +1,4 @@ -use crate::{utils::fixture_header_with_proof, Peertest}; +use crate::{utils::fixture_header_with_proof, Peertest, PeertestNode}; use alloy_primitives::{B256, U256}; use ethportal_api::{ types::{distance::Distance, portal_wire::ProtocolId}, @@ -133,10 +133,10 @@ pub async fn test_ping(protocol: ProtocolId, target: &Client, peertest: &Peertes assert_eq!(result.enr_seq, 1); } -pub async fn test_ping_cross_network(target: &Client, peertest: &Peertest) { +pub async fn test_ping_cross_network(mainnet_target: &Client, angelfood_node: &PeertestNode) { info!("Testing ping for history cross mainnet and angelfood discv5 protocol id"); - let bootnode_enr = peertest.bootnode.enr.clone(); - if let Ok(pong) = HistoryNetworkApiClient::ping(target, bootnode_enr).await { + let bootnode_enr = angelfood_node.enr.clone(); + if let Ok(pong) = HistoryNetworkApiClient::ping(mainnet_target, bootnode_enr).await { panic!("Expected ping to fail as mainnet/angelfood history nodes shouldn't be able to communicate {pong:?}"); }; } diff --git a/tests/self_peertest.rs b/tests/self_peertest.rs index 9f0d1eafc..d680f38c2 100644 --- a/tests/self_peertest.rs +++ b/tests/self_peertest.rs @@ -6,7 +6,10 @@ use std::{ }; use ethportal_api::types::{ - cli::{TrinConfig, DEFAULT_WEB3_HTTP_ADDRESS, DEFAULT_WEB3_IPC_PATH}, + cli::{ + TrinConfig, BEACON_NETWORK, DEFAULT_WEB3_HTTP_ADDRESS, DEFAULT_WEB3_IPC_PATH, + HISTORY_NETWORK, STATE_NETWORK, + }, portal_wire::ProtocolId, }; use ethportal_peertest as peertest; @@ -26,7 +29,8 @@ async fn peertest_stateless() { // without needing to reset the database between tests. // If a scenario is testing the state of the content database, // it should be added to its own test function. - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = + setup_peertest("mainnet", &[HISTORY_NETWORK, BEACON_NETWORK, STATE_NETWORK]).await; peertest::scenarios::paginate::test_paginate_local_storage(&peertest).await; peertest::scenarios::basic::test_web3_client_version(&target).await; @@ -59,7 +63,7 @@ async fn peertest_stateless() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_populated_offer() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::offer_accept::test_populated_offer(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -68,7 +72,7 @@ async fn peertest_populated_offer() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_unpopulated_offer() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::offer_accept::test_unpopulated_offer(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -77,7 +81,7 @@ async fn peertest_unpopulated_offer() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_unpopulated_offer_fails_with_missing_content() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::offer_accept::test_unpopulated_offer_fails_with_missing_content( &peertest, &target, ) @@ -89,7 +93,7 @@ async fn peertest_unpopulated_offer_fails_with_missing_content() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_gossip_with_trace() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::gossip::test_gossip_with_trace(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -98,7 +102,7 @@ async fn peertest_gossip_with_trace() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_find_content_return_enr() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::find::test_find_content_return_enr(&target, &peertest).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -107,7 +111,7 @@ async fn peertest_find_content_return_enr() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_trace_recursive_find_content_local_db() { - let (peertest, _target, handle) = setup_peertest("mainnet").await; + let (peertest, _target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::find::test_trace_recursive_find_content_local_db(&peertest).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -116,7 +120,7 @@ async fn peertest_trace_recursive_find_content_local_db() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_trace_recursive_find_content_for_absent_content() { - let (peertest, _target, handle) = setup_peertest("mainnet").await; + let (peertest, _target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::find::test_trace_recursive_find_content_for_absent_content(&peertest) .await; peertest.exit_all_nodes(); @@ -126,7 +130,7 @@ async fn peertest_trace_recursive_find_content_for_absent_content() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_trace_recursive_find_content() { - let (peertest, _target, handle) = setup_peertest("mainnet").await; + let (peertest, _target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::find::test_trace_recursive_find_content(&peertest).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -135,7 +139,7 @@ async fn peertest_trace_recursive_find_content() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_validate_pre_merge_header_with_proof() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::validation::test_validate_pre_merge_header_with_proof(&peertest, &target) .await; peertest.exit_all_nodes(); @@ -145,7 +149,7 @@ async fn peertest_validate_pre_merge_header_with_proof() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_invalidate_header_by_hash() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::validation::test_invalidate_header_by_hash(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -154,7 +158,7 @@ async fn peertest_invalidate_header_by_hash() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_validate_block_body() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::validation::test_validate_pre_merge_block_body(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -163,7 +167,7 @@ async fn peertest_validate_block_body() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_validate_receipts() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::validation::test_validate_pre_merge_receipts(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -172,7 +176,7 @@ async fn peertest_validate_receipts() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_recursive_utp() { - let (peertest, _target, handle) = setup_peertest("mainnet").await; + let (peertest, _target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::utp::test_recursive_utp(&peertest).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -181,7 +185,7 @@ async fn peertest_recursive_utp() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_trace_recursive_utp() { - let (peertest, _target, handle) = setup_peertest("mainnet").await; + let (peertest, _target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::utp::test_trace_recursive_utp(&peertest).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -190,7 +194,8 @@ async fn peertest_trace_recursive_utp() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_state_offer_account_trie_node() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = + setup_peertest("mainnet", &[HISTORY_NETWORK, STATE_NETWORK]).await; peertest::scenarios::state::test_state_offer_account_trie_node(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -199,7 +204,8 @@ async fn peertest_state_offer_account_trie_node() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_state_offer_contract_storage_trie_node() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = + setup_peertest("mainnet", &[HISTORY_NETWORK, STATE_NETWORK]).await; peertest::scenarios::state::test_state_gossip_contract_storage_trie_node(&peertest, &target) .await; peertest.exit_all_nodes(); @@ -209,7 +215,8 @@ async fn peertest_state_offer_contract_storage_trie_node() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_state_offer_contract_bytecode() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = + setup_peertest("mainnet", &[HISTORY_NETWORK, STATE_NETWORK]).await; peertest::scenarios::state::test_state_gossip_contract_bytecode(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -218,7 +225,7 @@ async fn peertest_state_offer_contract_bytecode() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_history_offer_propagates_gossip() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::offer_accept::test_offer_propagates_gossip(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -227,7 +234,7 @@ async fn peertest_history_offer_propagates_gossip() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_history_offer_propagates_gossip_with_large_content() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::offer_accept::test_offer_propagates_gossip_with_large_content( &peertest, &target, ) @@ -239,7 +246,7 @@ async fn peertest_history_offer_propagates_gossip_with_large_content() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_history_offer_propagates_gossip_multiple_content_values() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::offer_accept::test_offer_propagates_gossip_multiple_content_values( &peertest, &target, ) @@ -251,7 +258,7 @@ async fn peertest_history_offer_propagates_gossip_multiple_content_values() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_history_offer_propagates_gossip_multiple_large_content_values() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::offer_accept::test_offer_propagates_gossip_multiple_large_content_values( &peertest, &target, ) @@ -263,7 +270,7 @@ async fn peertest_history_offer_propagates_gossip_multiple_large_content_values( #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_history_gossip_dropped_with_offer() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::gossip::test_gossip_dropped_with_offer(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -272,7 +279,7 @@ async fn peertest_history_gossip_dropped_with_offer() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_history_gossip_dropped_with_find_content() { - let (peertest, target, handle) = setup_peertest("mainnet").await; + let (peertest, target, handle) = setup_peertest("mainnet", &[HISTORY_NETWORK]).await; peertest::scenarios::gossip::test_gossip_dropped_with_find_content(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -281,16 +288,59 @@ async fn peertest_history_gossip_dropped_with_find_content() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_ping_cross_discv5_protocol_id() { - let (peertest, target, handle) = setup_peertest("angelfood").await; - peertest::scenarios::basic::test_ping_cross_network(&target, &peertest).await; - peertest.exit_all_nodes(); - handle.stop().unwrap(); + // Run peernodes on angelfood + let angelfood_peertest = peertest::launch_peertest_nodes(2, "angelfood", HISTORY_NETWORK).await; + + // Run a client on the mainnet, to be tested + // Use an uncommon port for the peertest to avoid clashes. + let test_discovery_port = 8999; + let external_addr = format!("{}:{test_discovery_port}", Ipv4Addr::new(127, 0, 0, 1)); + + let trin_config = TrinConfig::new_from( + [ + "trin", + "--network", + "mainnet", + "--portal-subnetworks", + HISTORY_NETWORK, + "--external-address", + external_addr.as_str(), + "--web3-ipc-path", + DEFAULT_WEB3_IPC_PATH, + "--ephemeral", + "--discovery-port", + test_discovery_port.to_string().as_ref(), + "--bootnodes", + "none", + ] + .iter(), + ) + .unwrap(); + let mainnet_handle = trin::run_trin(trin_config).await.unwrap(); + let mainnet_target = reth_ipc::client::IpcClientBuilder::default() + .build(DEFAULT_WEB3_IPC_PATH) + .await + .unwrap(); + + peertest::scenarios::basic::test_ping_cross_network( + &mainnet_target, + &angelfood_peertest.bootnode, + ) + .await; + angelfood_peertest.exit_all_nodes(); + mainnet_handle.stop().unwrap(); } -async fn setup_peertest(network: &str) -> (peertest::Peertest, Client, RpcServerHandle) { +async fn setup_peertest( + network: &str, + subnetworks: &[&str], +) -> (peertest::Peertest, Client, RpcServerHandle) { utils::init_tracing(); + + let subnetworks = subnetworks.join(","); + // Run a client, as a buddy peer for ping tests, etc. - let peertest = peertest::launch_peertest_nodes(2).await; + let peertest = peertest::launch_peertest_nodes(2, network, &subnetworks).await; // Short sleep to make sure all peertest nodes can connect sleep(Duration::from_millis(100)).await; @@ -303,8 +353,10 @@ async fn setup_peertest(network: &str) -> (peertest::Peertest, Client, RpcServer let trin_config = TrinConfig::new_from( [ "trin", + "--network", + network, "--portal-subnetworks", - "history,beacon,state", + subnetworks.as_str(), "--external-address", external_addr.as_str(), "--web3-ipc-path", @@ -314,8 +366,6 @@ async fn setup_peertest(network: &str) -> (peertest::Peertest, Client, RpcServer test_discovery_port.to_string().as_ref(), "--bootnodes", "none", - "--network", - network, ] .iter(), ) @@ -329,10 +379,13 @@ async fn setup_peertest(network: &str) -> (peertest::Peertest, Client, RpcServer (peertest, target, test_client_rpc_handle) } -async fn setup_peertest_bridge() -> (Peertest, HttpClient, RpcServerHandle) { +async fn setup_peertest_bridge(subnetworks: &[&str]) -> (Peertest, HttpClient, RpcServerHandle) { utils::init_tracing(); + + let network = "mainnet"; + let subnetworks = subnetworks.join(","); // Run a client, as a buddy peer for ping tests, etc. - let peertest = peertest::launch_peertest_nodes(1).await; + let peertest = peertest::launch_peertest_nodes(1, network, &subnetworks).await; // Short sleep to make sure all peertest nodes can connect sleep(Duration::from_millis(100)).await; @@ -348,8 +401,10 @@ async fn setup_peertest_bridge() -> (Peertest, HttpClient, RpcServerHandle) { let trin_config = TrinConfig::new_from( [ "trin", + "--network", + network, "--portal-subnetworks", - "history,beacon,state", + subnetworks.as_str(), "--external-address", external_addr.as_str(), // Run bridge test with http, since bridge doesn't support ipc yet. @@ -377,7 +432,7 @@ async fn setup_peertest_bridge() -> (Peertest, HttpClient, RpcServerHandle) { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_history_bridge() { - let (peertest, target, handle) = setup_peertest_bridge().await; + let (peertest, target, handle) = setup_peertest_bridge(&[HISTORY_NETWORK]).await; peertest::scenarios::bridge::test_history_bridge(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); @@ -386,7 +441,7 @@ async fn peertest_history_bridge() { #[tokio::test(flavor = "multi_thread")] #[serial] async fn peertest_beacon_bridge() { - let (peertest, target, handle) = setup_peertest_bridge().await; + let (peertest, target, handle) = setup_peertest_bridge(&[BEACON_NETWORK]).await; peertest::scenarios::bridge::test_beacon_bridge(&peertest, &target).await; peertest.exit_all_nodes(); handle.stop().unwrap(); diff --git a/trin-beacon/src/storage.rs b/trin-beacon/src/storage.rs index 4bae96a99..a588ad3fe 100644 --- a/trin-beacon/src/storage.rs +++ b/trin-beacon/src/storage.rs @@ -31,7 +31,7 @@ use trin_storage::{ LC_UPDATE_TOTAL_SIZE_QUERY, TOTAL_DATA_SIZE_QUERY_BEACON, }, utils::get_total_size_of_directory_in_bytes, - ContentStore, DataSize, PortalStorageConfig, ShouldWeStoreContent, BYTES_IN_MB_U64, + ContentStore, DataSize, PortalStorageConfig, ShouldWeStoreContent, }; /// Store ephemeral light client data in memory @@ -137,7 +137,6 @@ impl BeaconStorageCache { pub struct BeaconStorage { node_data_dir: PathBuf, sql_connection_pool: Pool, - storage_capacity_in_bytes: u64, metrics: StorageMetricsReporter, cache: BeaconStorageCache, } @@ -329,16 +328,10 @@ impl BeaconStorage { let storage = Self { node_data_dir: config.node_data_dir, sql_connection_pool: config.sql_connection_pool, - storage_capacity_in_bytes: config.storage_capacity_mb * BYTES_IN_MB_U64, metrics: StorageMetricsReporter::new(ProtocolId::Beacon), cache: BeaconStorageCache::new(), }; - // Report current storage capacity. - storage - .metrics - .report_storage_capacity_bytes(storage.storage_capacity_in_bytes as f64); - // Report current total storage usage. let total_storage_usage = storage.get_total_storage_usage_in_bytes_on_disk()?; storage diff --git a/trin-storage/src/config.rs b/trin-storage/src/config.rs index 27bf133ae..e3e510ebe 100644 --- a/trin-storage/src/config.rs +++ b/trin-storage/src/config.rs @@ -7,6 +7,8 @@ use r2d2_sqlite::SqliteConnectionManager; use crate::{error::ContentStoreError, utils::setup_sql, DistanceFunction}; +const BYTES_IN_MB_U64: u64 = 1000 * 1000; + /// Factory for creating [PortalStorageConfig] instances pub struct PortalStorageConfigFactory { node_id: NodeId, @@ -18,7 +20,7 @@ pub struct PortalStorageConfigFactory { impl PortalStorageConfigFactory { const HISTORY_CAPACITY_WEIGHT: u64 = 1; - const STATE_CAPACITY_WEIGHT: u64 = 100; + const STATE_CAPACITY_WEIGHT: u64 = 99; const BEACON_CAPACITY_WEIGHT: u64 = 0; // Beacon doesn't care about given capacity pub fn new( @@ -45,13 +47,13 @@ impl PortalStorageConfigFactory { pub fn create(&self, subnetwork: &str) -> PortalStorageConfig { let capacity_weight = Self::get_capacity_weight(subnetwork); - let capacity_mb = if self.total_capacity_weight == 0 { + let capacity_bytes = if self.total_capacity_weight == 0 { 0 } else { - self.total_capacity_mb * capacity_weight / self.total_capacity_weight + BYTES_IN_MB_U64 * self.total_capacity_mb * capacity_weight / self.total_capacity_weight }; PortalStorageConfig { - storage_capacity_mb: capacity_mb, + storage_capacity_bytes: capacity_bytes, node_id: self.node_id, node_data_dir: self.node_data_dir.clone(), distance_fn: DistanceFunction::Xor, @@ -71,7 +73,7 @@ impl PortalStorageConfigFactory { #[derive(Clone)] pub struct PortalStorageConfig { - pub storage_capacity_mb: u64, + pub storage_capacity_bytes: u64, pub node_id: NodeId, pub node_data_dir: PathBuf, pub distance_fn: DistanceFunction, diff --git a/trin-storage/src/lib.rs b/trin-storage/src/lib.rs index 5163910d3..45a4ca7c0 100644 --- a/trin-storage/src/lib.rs +++ b/trin-storage/src/lib.rs @@ -18,7 +18,6 @@ use std::{ops::Deref, str::FromStr}; pub use config::{PortalStorageConfig, PortalStorageConfigFactory}; pub const DATABASE_NAME: &str = "trin.sqlite"; -pub const BYTES_IN_MB_U64: u64 = 1000 * 1000; // TODO: Replace enum with generic type parameter. This will require that we have a way to // associate a "find farthest" query with the generic Metric. diff --git a/trin-storage/src/versioned/id_indexed_v1/config.rs b/trin-storage/src/versioned/id_indexed_v1/config.rs index 1d01d6208..56f369829 100644 --- a/trin-storage/src/versioned/id_indexed_v1/config.rs +++ b/trin-storage/src/versioned/id_indexed_v1/config.rs @@ -5,7 +5,7 @@ use ethportal_api::types::portal_wire::ProtocolId; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use crate::{versioned::ContentType, DistanceFunction, PortalStorageConfig, BYTES_IN_MB_U64}; +use crate::{versioned::ContentType, DistanceFunction, PortalStorageConfig}; use super::pruning_strategy::PruningConfig; @@ -33,7 +33,7 @@ impl IdIndexedV1StoreConfig { network, node_id: config.node_id, node_data_dir: config.node_data_dir, - storage_capacity_bytes: config.storage_capacity_mb * BYTES_IN_MB_U64, + storage_capacity_bytes: config.storage_capacity_bytes, sql_connection_pool: config.sql_connection_pool, distance_fn: config.distance_fn, // consider making this a parameter if we start using non-default value From 2f99827cebadd452e5b4c414986d21f8767ca349 Mon Sep 17 00:00:00 2001 From: morph <82043364+morph-dev@users.noreply.github.com> Date: Tue, 20 Aug 2024 23:39:04 +0300 Subject: [PATCH 3/3] fix: pr comments --- ethportal-peertest/src/scenarios/basic.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethportal-peertest/src/scenarios/basic.rs b/ethportal-peertest/src/scenarios/basic.rs index 6ecc4549e..d5f599db9 100644 --- a/ethportal-peertest/src/scenarios/basic.rs +++ b/ethportal-peertest/src/scenarios/basic.rs @@ -135,8 +135,8 @@ pub async fn test_ping(protocol: ProtocolId, target: &Client, peertest: &Peertes pub async fn test_ping_cross_network(mainnet_target: &Client, angelfood_node: &PeertestNode) { info!("Testing ping for history cross mainnet and angelfood discv5 protocol id"); - let bootnode_enr = angelfood_node.enr.clone(); - if let Ok(pong) = HistoryNetworkApiClient::ping(mainnet_target, bootnode_enr).await { + let angelfood_enr = angelfood_node.enr.clone(); + if let Ok(pong) = HistoryNetworkApiClient::ping(mainnet_target, angelfood_enr).await { panic!("Expected ping to fail as mainnet/angelfood history nodes shouldn't be able to communicate {pong:?}"); }; }