diff --git a/Cargo.lock b/Cargo.lock index 68daf2f3e10..30368383981 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10226,6 +10226,9 @@ dependencies = [ "anyhow", "async-recursion", "async-trait", + "bincode", + "hex", + "itertools 0.10.5", "test-log", "thiserror", "tokio", @@ -10239,6 +10242,8 @@ dependencies = [ "zksync_shared_metrics", "zksync_system_constants", "zksync_types", + "zksync_utils", + "zksync_web3_decl", ] [[package]] diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 048d8fbfd10..9a1e46f04ee 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -287,6 +287,7 @@ impl MainNodeBuilder { .as_ref() .and_then(|x| Some(x.gas_adjuster?.settlement_mode)) .unwrap_or(SettlementMode::SettlesToL1), + self.genesis_config.l2_chain_id, )); Ok(self) } diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index aa7c4967033..1997db6f0b6 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -190,7 +190,7 @@ pub struct Filter { } #[derive(Default, Debug, PartialEq, Clone)] -pub struct ValueOrArray(Vec); +pub struct ValueOrArray(pub Vec); impl ValueOrArray { pub fn flatten(self) -> Vec { @@ -198,6 +198,12 @@ impl ValueOrArray { } } +impl From for ValueOrArray { + fn from(value: T) -> Self { + Self(vec![value]) + } +} + impl Serialize for ValueOrArray where T: Serialize, diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs index a8f4a034fb9..9bb8764cd66 100644 --- a/core/lib/constants/src/message_root.rs +++ b/core/lib/constants/src/message_root.rs @@ -1,5 +1,14 @@ -// Position of `FullTree::_height` in `MessageRoot`'s storage layout. +/// Position of `chainCount` in `MessageRoot`'s storage layout. +pub const CHAIN_COUNT_KEY: usize = 0; + +/// Position of `chainIndexToId` in `MessageRoot`'s storage layout. +pub const CHAIN_INDEX_TO_ID_KEY: usize = 2; + +/// Position of `FullTree::_height` in `MessageRoot`'s storage layout. pub const AGG_TREE_HEIGHT_KEY: usize = 3; -// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. +/// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. pub const AGG_TREE_NODES_KEY: usize = 5; + +/// Position of `chainTree` in `MessageRoot`'s storage layout. +pub const CHAIN_TREE_KEY: usize = 7; diff --git a/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json index b8d6482ea74..32a2212dfdf 100644 --- a/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json +++ b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json @@ -11,7 +11,8 @@ "kind": { "Enum": [ "ProtocolUpgrades", - "PriorityTransactions" + "PriorityTransactions", + "ChainBatchRoot" ] } } diff --git a/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json b/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json new file mode 100644 index 00000000000..adbd2c0931e --- /dev/null +++ b/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n batch_chain_merkle_path\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "batch_chain_merkle_path", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4" +} diff --git a/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json b/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json new file mode 100644 index 00000000000..69dd87a6c35 --- /dev/null +++ b/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n local_root\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "local_root", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b" +} diff --git a/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json index e2a808d41f8..8bab74d20f5 100644 --- a/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json +++ b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json @@ -17,7 +17,8 @@ "kind": { "Enum": [ "ProtocolUpgrades", - "PriorityTransactions" + "PriorityTransactions", + "ChainBatchRoot" ] } } diff --git a/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json b/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json new file mode 100644 index 00000000000..90623e77e98 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE\n l1_batches\n SET\n batch_chain_merkle_path = $2\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7" +} diff --git a/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json b/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json new file mode 100644 index 00000000000..751d272b0b0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l2_l1_merkle_root\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0" +} diff --git a/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json index 61832d25fd2..5e2ea45e0bc 100644 --- a/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json +++ b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json @@ -11,7 +11,8 @@ "kind": { "Enum": [ "ProtocolUpgrades", - "PriorityTransactions" + "PriorityTransactions", + "ChainBatchRoot" ] } } diff --git a/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json b/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json new file mode 100644 index 00000000000..9f7de50539b --- /dev/null +++ b/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number, l2_l1_merkle_root\n FROM\n l1_batches\n JOIN eth_txs ON eth_txs.id = l1_batches.eth_execute_tx_id\n WHERE\n batch_chain_merkle_path IS NOT NULL\n AND chain_id = $1\n ORDER BY number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l2_l1_merkle_root", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98" +} diff --git a/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql new file mode 100644 index 00000000000..da7142b8f81 --- /dev/null +++ b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE l1_batches + DROP COLUMN batch_chain_merkle_path BYTEA; diff --git a/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql new file mode 100644 index 00000000000..8b133f70904 --- /dev/null +++ b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + ADD COLUMN batch_chain_merkle_path BYTEA; + +-- postgres doesn't allow dropping enum variant, so nothing is done in down.sql +ALTER TYPE event_type ADD VALUE 'ChainBatchRoot'; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index f1419865601..0935ee245b7 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -21,9 +21,9 @@ use zksync_types::{ }, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, fee_model::BatchFeeInput, - l2_to_l1_log::UserL2ToL1Log, + l2_to_l1_log::{BatchAndChainMerklePath, UserL2ToL1Log}, writes::TreeWrite, - Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, + Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, SLChainId, H256, U256, }; use zksync_vm_interface::CircuitStatistic; @@ -2004,6 +2004,150 @@ impl BlocksDal<'_, '_> { Ok(Some((H256::from_slice(&hash), row.timestamp as u64))) } + pub async fn get_l1_batch_local_root( + &mut self, + number: L1BatchNumber, + ) -> DalResult> { + let Some(row) = sqlx::query!( + r#" + SELECT + local_root + FROM + l1_batches + WHERE + number = $1 + "#, + i64::from(number.0) + ) + .instrument("get_l1_batch_local_root") + .with_arg("number", &number) + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let Some(local_root) = row.local_root else { + return Ok(None); + }; + Ok(Some(H256::from_slice(&local_root))) + } + + pub async fn get_l1_batch_l2_l1_merkle_root( + &mut self, + number: L1BatchNumber, + ) -> DalResult> { + let Some(row) = sqlx::query!( + r#" + SELECT + l2_l1_merkle_root + FROM + l1_batches + WHERE + number = $1 + "#, + i64::from(number.0) + ) + .instrument("get_l1_batch_l2_l1_merkle_root") + .with_arg("number", &number) + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let Some(l2_l1_merkle_root) = row.l2_l1_merkle_root else { + return Ok(None); + }; + Ok(Some(H256::from_slice(&l2_l1_merkle_root))) + } + + pub async fn get_l1_batch_chain_merkle_path( + &mut self, + number: L1BatchNumber, + ) -> DalResult> { + let Some(row) = sqlx::query!( + r#" + SELECT + batch_chain_merkle_path + FROM + l1_batches + WHERE + number = $1 + "#, + i64::from(number.0) + ) + .instrument("get_l1_batch_chain_merkle_path") + .with_arg("number", &number) + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let Some(batch_chain_merkle_path) = row.batch_chain_merkle_path else { + return Ok(None); + }; + Ok(Some( + bincode::deserialize(&batch_chain_merkle_path).unwrap(), + )) + } + + pub async fn get_executed_batch_roots_on_sl( + &mut self, + sl_chain_id: SLChainId, + ) -> DalResult> { + let result = sqlx::query!( + r#" + SELECT + number, l2_l1_merkle_root + FROM + l1_batches + JOIN eth_txs ON eth_txs.id = l1_batches.eth_execute_tx_id + WHERE + batch_chain_merkle_path IS NOT NULL + AND chain_id = $1 + ORDER BY number + "#, + sl_chain_id.0 as i64 + ) + .instrument("get_executed_batch_roots_on_sl") + .with_arg("sl_chain_id", &sl_chain_id) + .fetch_all(self.storage) + .await? + .into_iter() + .map(|row| { + let number = L1BatchNumber(row.number as u32); + let root = H256::from_slice(&row.l2_l1_merkle_root.unwrap()); + (number, root) + }) + .collect(); + Ok(result) + } + + pub async fn set_batch_chain_merkle_path( + &mut self, + number: L1BatchNumber, + proof: BatchAndChainMerklePath, + ) -> DalResult<()> { + let proof_bin = bincode::serialize(&proof).unwrap(); + sqlx::query!( + r#" + UPDATE + l1_batches + SET + batch_chain_merkle_path = $2 + WHERE + number = $1 + "#, + i64::from(number.0), + &proof_bin + ) + .instrument("set_batch_chain_merkle_path") + .with_arg("number", &number) + .execute(self.storage) + .await?; + + Ok(()) + } + pub async fn get_l1_batch_metadata( &mut self, number: L1BatchNumber, diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs index 062ad47219d..84061a03650 100644 --- a/core/lib/dal/src/eth_watcher_dal.rs +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -12,6 +12,7 @@ pub struct EthWatcherDal<'a, 'c> { pub enum EventType { ProtocolUpgrades, PriorityTransactions, + ChainBatchRoot, } impl EthWatcherDal<'_, '_> { diff --git a/core/lib/mini_merkle_tree/src/lib.rs b/core/lib/mini_merkle_tree/src/lib.rs index 64441c7d500..318f73acb84 100644 --- a/core/lib/mini_merkle_tree/src/lib.rs +++ b/core/lib/mini_merkle_tree/src/lib.rs @@ -170,7 +170,7 @@ where /// Returns the root hash and the Merkle proof for a leaf with the specified 0-based `index`. /// `index` is relative to the leftmost uncached leaf. /// # Panics - /// Panics if `index` is >= than the number of leaves in the tree. + /// Panics if `index` is >= than the number of uncached leaves in the tree. pub fn merkle_root_and_path(&self, index: usize) -> (H256, Vec) { assert!(index < self.hashes.len(), "leaf index out of bounds"); let mut end_path = vec![]; @@ -181,6 +181,15 @@ where ) } + /// Returns the root hash and the Merkle proof for a leaf with the specified 0-based `index`. + /// `index` is an absolute position of the leaf. + /// # Panics + /// Panics if leaf at `index` is cached or if `index` is >= than the number of leaves in the tree. + pub fn merkle_root_and_path_by_absolute_index(&self, index: usize) -> (H256, Vec) { + assert!(index >= self.start_index, "leaf is cached"); + self.merkle_root_and_path(index - self.start_index) + } + /// Returns the root hash and the Merkle proofs for a range of leafs. /// The range is 0..length, where `0` is the leftmost untrimmed leaf (i.e. leaf under `self.start_index`). /// # Panics diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index ff24667aa2e..5f36451b976 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -9,7 +9,6 @@ use zksync_basic_types::{ Bloom, L1BatchNumber, H160, H256, H64, U256, U64, }; use zksync_contracts::BaseSystemContractsHashes; -use zksync_utils::u256_to_h256; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, @@ -197,23 +196,6 @@ pub struct L2ToL1LogProof { pub root: H256, } -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(rename_all = "camelCase")] -pub struct LeafAggProof { - pub leaf_chain_proof: LeafChainProof, - pub chain_agg_proof: ChainAggProof, - pub local_msg_root: H256, - pub sl_batch_number: U256, - pub sl_chain_id: U256, -} - -#[derive(Debug, Serialize, Deserialize, Clone)] -#[serde(rename_all = "camelCase")] -pub struct LeafChainProof { - pub batch_leaf_proof: Vec, - pub batch_leaf_proof_mask: U256, -} - #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "camelCase")] pub struct ChainAggProof { @@ -221,43 +203,6 @@ pub struct ChainAggProof { pub chain_id_leaf_proof_mask: U256, } -impl LeafAggProof { - pub fn encode(self) -> (u32, Vec) { - let mut encoded_result = vec![]; - - let LeafAggProof { - leaf_chain_proof, - chain_agg_proof, - sl_batch_number, - sl_chain_id, - .. - } = self; - - let LeafChainProof { - batch_leaf_proof, - batch_leaf_proof_mask, - } = leaf_chain_proof; - - let ChainAggProof { - chain_id_leaf_proof: _, - chain_id_leaf_proof_mask, - } = chain_agg_proof; - - let batch_leaf_proof_len = batch_leaf_proof.len() as u32; - - encoded_result.push(u256_to_h256(batch_leaf_proof_mask)); - encoded_result.extend(batch_leaf_proof); - - let sl_encoded_data = - sl_batch_number * U256::from(2).pow(128.into()) + chain_id_leaf_proof_mask; - encoded_result.push(u256_to_h256(sl_encoded_data)); - - encoded_result.push(u256_to_h256(sl_chain_id)); - - (batch_leaf_proof_len, encoded_result) - } -} - /// A struct with the two default bridge contracts. #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -530,6 +475,45 @@ impl Log { } } +impl From for zksync_basic_types::web3::Log { + fn from(log: Log) -> Self { + zksync_basic_types::web3::Log { + address: log.address, + topics: log.topics, + data: log.data, + block_hash: log.block_hash, + block_number: log.block_number, + transaction_hash: log.transaction_hash, + transaction_index: log.transaction_index, + log_index: log.log_index, + transaction_log_index: log.transaction_log_index, + log_type: log.log_type, + removed: log.removed, + block_timestamp: log.block_timestamp, + } + } +} + +impl From for Log { + fn from(log: zksync_basic_types::web3::Log) -> Self { + Log { + address: log.address, + topics: log.topics, + data: log.data, + block_hash: log.block_hash, + block_number: log.block_number, + transaction_hash: log.transaction_hash, + transaction_index: log.transaction_index, + log_index: log.log_index, + transaction_log_index: log.transaction_log_index, + log_type: log.log_type, + removed: log.removed, + block_timestamp: log.block_timestamp, + l1_batch_number: None, + } + } +} + /// A log produced by a transaction. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 957cfa9a1a6..566f941ff77 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -79,6 +79,24 @@ pub fn l2_to_l1_logs_tree_size(protocol_version: ProtocolVersionId) -> usize { } } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BatchAndChainMerklePath { + pub batch_proof_len: u32, + pub proof: Vec, +} + +pub const LOG_PROOF_SUPPORTED_METADATA_VERSION: u8 = 1; + +pub const BATCH_LEAF_PADDING: H256 = H256([ + 0xd8, 0x2f, 0xec, 0x4a, 0x37, 0xcb, 0xdc, 0x47, 0xf1, 0xe5, 0xcc, 0x4a, 0xd6, 0x4d, 0xea, 0xcf, + 0x34, 0xa4, 0x8e, 0x6f, 0x7c, 0x61, 0xfa, 0x5b, 0x68, 0xfd, 0x58, 0xe5, 0x43, 0x25, 0x9c, 0xf4, +]); + +pub const CHAIN_ID_LEAF_PADDING: H256 = H256([ + 0x39, 0xbc, 0x69, 0x36, 0x3b, 0xb9, 0xe2, 0x6c, 0xf1, 0x42, 0x40, 0xde, 0x4e, 0x22, 0x56, 0x9e, + 0x95, 0xcf, 0x17, 0x5c, 0xfb, 0xcf, 0x1a, 0xde, 0x1a, 0x47, 0xa2, 0x53, 0xb4, 0xbf, 0x7f, 0x61, +]); + /// Returns the blob hashes parsed out from the system logs pub fn parse_system_logs_for_blob_hashes_pre_gateway( protocol_version: &ProtocolVersionId, diff --git a/core/lib/web3_decl/src/namespaces/unstable.rs b/core/lib/web3_decl/src/namespaces/unstable.rs index e6b36dd2684..f666f02f281 100644 --- a/core/lib/web3_decl/src/namespaces/unstable.rs +++ b/core/lib/web3_decl/src/namespaces/unstable.rs @@ -2,9 +2,9 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ - api::{TeeProof, TransactionExecutionInfo}, + api::{ChainAggProof, TeeProof, TransactionExecutionInfo}, tee_types::TeeType, - L1BatchNumber, H256, + L1BatchNumber, L2ChainId, H256, }; use crate::client::{ForWeb3Network, L2}; @@ -31,4 +31,11 @@ pub trait UnstableNamespace { l1_batch_number: L1BatchNumber, tee_type: Option, ) -> RpcResult>; + + #[method(name = "getChainLogProof")] + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> RpcResult>; } diff --git a/core/lib/web3_decl/src/namespaces/zks.rs b/core/lib/web3_decl/src/namespaces/zks.rs index 169d27aac1d..42bf9b3bed3 100644 --- a/core/lib/web3_decl/src/namespaces/zks.rs +++ b/core/lib/web3_decl/src/namespaces/zks.rs @@ -6,8 +6,8 @@ use jsonrpsee::proc_macros::rpc; use zksync_types::{ api::{ state_override::StateOverride, BlockDetails, BridgeAddresses, L1BatchDetails, - L1ProcessingDetails, L2ToL1LogProof, LeafAggProof, Proof, ProtocolVersion, - TransactionDetailedResult, TransactionDetails, + L1ProcessingDetails, L2ToL1LogProof, Proof, ProtocolVersion, TransactionDetailedResult, + TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -84,14 +84,6 @@ pub trait ZksNamespace { index: Option, ) -> RpcResult>; - #[method(name = "getAggBatchInclusionProof")] - async fn get_aggregated_batch_inclusion_proof( - &self, - message_root_addr: Address, - batch_number: L1BatchNumber, - chain_id: u32, - ) -> RpcResult>; - #[method(name = "L1BatchNumber")] async fn get_l1_batch_number(&self) -> RpcResult; diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 36ee48a54a1..dc79ebffce9 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -5,14 +5,11 @@ //! //! These "extensions" are required to provide more ZKsync-specific information while remaining Web3-compilant. -use core::{ - convert::{TryFrom, TryInto}, - fmt, - marker::PhantomData, -}; +use core::convert::{TryFrom, TryInto}; use rlp::Rlp; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Serialize}; +use zksync_types::web3::ValueOrArray; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, ethabi, @@ -101,71 +98,6 @@ pub enum FilterChanges { Empty([u8; 0]), } -/// Either value or array of values. -/// -/// A value must serialize into a string. -#[derive(Default, Debug, PartialEq, Clone)] -pub struct ValueOrArray(pub Vec); - -impl From for ValueOrArray { - fn from(value: T) -> Self { - Self(vec![value]) - } -} - -impl Serialize for ValueOrArray { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match self.0.len() { - 0 => serializer.serialize_none(), - 1 => Serialize::serialize(&self.0[0], serializer), - _ => Serialize::serialize(&self.0, serializer), - } - } -} - -impl<'de, T: Deserialize<'de>> Deserialize<'de> for ValueOrArray { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - struct Visitor(PhantomData); - - impl<'de, T: Deserialize<'de>> de::Visitor<'de> for Visitor { - type Value = ValueOrArray; - - fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - formatter.write_str("string value or sequence of values") - } - - fn visit_str(self, value: &str) -> Result - where - E: de::Error, - { - use serde::de::IntoDeserializer; - - Deserialize::deserialize(value.into_deserializer()) - .map(|value| ValueOrArray(vec![value])) - } - - fn visit_seq(self, mut visitor: S) -> Result - where - S: de::SeqAccess<'de>, - { - let mut elements = Vec::with_capacity(visitor.size_hint().unwrap_or(1)); - while let Some(element) = visitor.next_element()? { - elements.push(element); - } - Ok(ValueOrArray(elements)) - } - } - - deserializer.deserialize_any(Visitor(PhantomData)) - } -} - /// Filter #[derive(Default, Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct Filter { @@ -185,6 +117,28 @@ pub struct Filter { pub block_hash: Option, } +impl From for Filter { + fn from(value: zksync_types::web3::Filter) -> Self { + let convert_block_number = |b: zksync_types::web3::BlockNumber| match b { + zksync_types::web3::BlockNumber::Finalized => BlockNumber::Finalized, + zksync_types::web3::BlockNumber::Safe => BlockNumber::Finalized, + zksync_types::web3::BlockNumber::Latest => BlockNumber::Latest, + zksync_types::web3::BlockNumber::Earliest => BlockNumber::Earliest, + zksync_types::web3::BlockNumber::Pending => BlockNumber::Pending, + zksync_types::web3::BlockNumber::Number(n) => BlockNumber::Number(n), + }; + let from_block = value.from_block.map(convert_block_number); + let to_block = value.to_block.map(convert_block_number); + Filter { + from_block, + to_block, + address: value.address, + topics: value.topics, + block_hash: value.block_hash, + } + } +} + /// Filter Builder #[derive(Default, Clone)] pub struct FilterBuilder { diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs index 91330aa7d94..cfa8c84b05b 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/unstable.rs @@ -1,7 +1,7 @@ use zksync_types::{ - api::{TeeProof, TransactionExecutionInfo}, + api::{ChainAggProof, TeeProof, TransactionExecutionInfo}, tee_types::TeeType, - L1BatchNumber, H256, + L1BatchNumber, L2ChainId, H256, }; use zksync_web3_decl::{ jsonrpsee::core::{async_trait, RpcResult}, @@ -30,4 +30,14 @@ impl UnstableNamespaceServer for UnstableNamespace { .await .map_err(|err| self.current_method().map_err(err)) } + + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> RpcResult> { + self.get_chain_log_proof_impl(l1_batch_number, chain_id) + .await + .map_err(|err| self.current_method().map_err(err)) + } } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index 6ae1fec5b94..f705efdc819 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -4,8 +4,8 @@ use zksync_multivm::interface::VmEvent; use zksync_types::{ api::{ state_override::StateOverride, ApiStorageLog, BlockDetails, BridgeAddresses, - L1BatchDetails, L1ProcessingDetails, L2ToL1LogProof, LeafAggProof, Log, Proof, - ProtocolVersion, TransactionDetailedResult, TransactionDetails, + L1BatchDetails, L1ProcessingDetails, L2ToL1LogProof, Log, Proof, ProtocolVersion, + TransactionDetailedResult, TransactionDetails, }, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, @@ -99,17 +99,6 @@ impl ZksNamespaceServer for ZksNamespace { .map_err(|err| self.current_method().map_err(err)) } - async fn get_aggregated_batch_inclusion_proof( - &self, - message_root_addr: Address, - batch_number: L1BatchNumber, - chain_id: u32, - ) -> RpcResult> { - self.get_aggregated_batch_inclusion_proof_impl(message_root_addr, batch_number, chain_id) - .await - .map_err(|err| self.current_method().map_err(err)) - } - async fn get_l1_batch_number(&self) -> RpcResult { self.get_l1_batch_number_impl() .await diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index 620e9185078..62f76b9d35f 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -16,6 +16,7 @@ use zksync_metadata_calculator::api_server::TreeApiClient; use zksync_node_sync::SyncState; use zksync_types::L2BlockNumber; use zksync_web3_decl::{ + client::{DynClient, L2}, jsonrpsee::{ server::{ middleware::rpc::either::Either, BatchRequestConfig, RpcServiceBuilder, ServerBuilder, @@ -137,6 +138,7 @@ struct OptionalApiParams { mempool_cache: Option, extended_tracing: bool, pub_sub_events_sender: Option>, + l2_l1_log_proof_handler: Option>>, } /// Structure capable of spawning a configured Web3 API server along with all the required @@ -296,6 +298,14 @@ impl ApiBuilder { self } + pub fn with_l2_l1_log_proof_handler( + mut self, + l2_l1_log_proof_handler: Box>, + ) -> Self { + self.optional.l2_l1_log_proof_handler = Some(l2_l1_log_proof_handler); + self + } + // Intended for tests only. #[doc(hidden)] fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { @@ -379,6 +389,7 @@ impl ApiServer { last_sealed_l2_block: self.sealed_l2_block_handle, bridge_addresses_handle: self.bridge_addresses_handle, tree_api: self.optional.tree_api, + l2_l1_log_proof_handler: self.optional.l2_l1_log_proof_handler, }) } diff --git a/core/node/api_server/src/web3/namespaces/unstable.rs b/core/node/api_server/src/web3/namespaces/unstable.rs deleted file mode 100644 index 783088cdc36..00000000000 --- a/core/node/api_server/src/web3/namespaces/unstable.rs +++ /dev/null @@ -1,62 +0,0 @@ -use chrono::{DateTime, Utc}; -use zksync_dal::{CoreDal, DalError}; -use zksync_types::{ - api::{TeeProof, TransactionExecutionInfo}, - tee_types::TeeType, - L1BatchNumber, -}; -use zksync_web3_decl::{error::Web3Error, types::H256}; - -use crate::web3::{backend_jsonrpsee::MethodTracer, RpcState}; - -#[derive(Debug)] -pub(crate) struct UnstableNamespace { - state: RpcState, -} - -impl UnstableNamespace { - pub fn new(state: RpcState) -> Self { - Self { state } - } - - pub(crate) fn current_method(&self) -> &MethodTracer { - &self.state.current_method - } - - pub async fn transaction_execution_info_impl( - &self, - hash: H256, - ) -> Result, Web3Error> { - let mut storage = self.state.acquire_connection().await?; - Ok(storage - .transactions_web3_dal() - .get_unstable_transaction_execution_info(hash) - .await - .map_err(DalError::generalize)? - .map(|execution_info| TransactionExecutionInfo { execution_info })) - } - - pub async fn get_tee_proofs_impl( - &self, - l1_batch_number: L1BatchNumber, - tee_type: Option, - ) -> Result, Web3Error> { - let mut storage = self.state.acquire_connection().await?; - Ok(storage - .tee_proof_generation_dal() - .get_tee_proofs(l1_batch_number, tee_type) - .await - .map_err(DalError::generalize)? - .into_iter() - .map(|proof| TeeProof { - l1_batch_number, - tee_type, - pubkey: proof.pubkey, - signature: proof.signature, - proof: proof.proof, - proved_at: DateTime::::from_naive_utc_and_offset(proof.updated_at, Utc), - attestation: proof.attestation, - }) - .collect::>()) - } -} diff --git a/core/node/api_server/src/web3/namespaces/unstable/mod.rs b/core/node/api_server/src/web3/namespaces/unstable/mod.rs new file mode 100644 index 00000000000..8b154a8e544 --- /dev/null +++ b/core/node/api_server/src/web3/namespaces/unstable/mod.rs @@ -0,0 +1,139 @@ +use chrono::{DateTime, Utc}; +use itertools::Itertools; +use utils::{ + chain_id_leaf_preimage, get_chain_count, get_chain_id_from_index, get_chain_root_from_id, +}; +use zksync_crypto_primitives::hasher::keccak::KeccakHasher; +use zksync_dal::{CoreDal, DalError}; +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::{ + api::{ChainAggProof, TeeProof, TransactionExecutionInfo}, + tee_types::TeeType, + L1BatchNumber, L2ChainId, +}; +use zksync_web3_decl::{error::Web3Error, types::H256}; + +use crate::web3::{backend_jsonrpsee::MethodTracer, RpcState}; + +mod utils; + +#[derive(Debug)] +pub(crate) struct UnstableNamespace { + state: RpcState, +} + +impl UnstableNamespace { + pub fn new(state: RpcState) -> Self { + Self { state } + } + + pub(crate) fn current_method(&self) -> &MethodTracer { + &self.state.current_method + } + + pub async fn transaction_execution_info_impl( + &self, + hash: H256, + ) -> Result, Web3Error> { + let mut storage = self.state.acquire_connection().await?; + Ok(storage + .transactions_web3_dal() + .get_unstable_transaction_execution_info(hash) + .await + .map_err(DalError::generalize)? + .map(|execution_info| TransactionExecutionInfo { execution_info })) + } + + pub async fn get_tee_proofs_impl( + &self, + l1_batch_number: L1BatchNumber, + tee_type: Option, + ) -> Result, Web3Error> { + let mut storage = self.state.acquire_connection().await?; + Ok(storage + .tee_proof_generation_dal() + .get_tee_proofs(l1_batch_number, tee_type) + .await + .map_err(DalError::generalize)? + .into_iter() + .map(|proof| TeeProof { + l1_batch_number, + tee_type, + pubkey: proof.pubkey, + signature: proof.signature, + proof: proof.proof, + proved_at: DateTime::::from_naive_utc_and_offset(proof.updated_at, Utc), + attestation: proof.attestation, + }) + .collect::>()) + } + + pub async fn get_chain_log_proof_impl( + &self, + l1_batch_number: L1BatchNumber, + l2_chain_id: L2ChainId, + ) -> Result, Web3Error> { + let mut connection = self.state.acquire_connection().await?; + self.state + .start_info + .ensure_not_pruned(l1_batch_number, &mut connection) + .await?; + + let Some((_, l2_block_number)) = connection + .blocks_dal() + .get_l2_block_range_of_l1_batch(l1_batch_number) + .await + .map_err(DalError::generalize)? + else { + return Ok(None); + }; + let chain_count_integer = get_chain_count(&mut connection, l2_block_number).await?; + + let mut chain_ids = Vec::new(); + for chain_index in 0..chain_count_integer { + chain_ids.push( + get_chain_id_from_index(&mut connection, chain_index, l2_block_number).await?, + ); + } + + let Some((chain_id_leaf_proof_mask, _)) = chain_ids + .iter() + .find_position(|id| **id == H256::from_low_u64_be(l2_chain_id.0)) + else { + return Ok(None); + }; + + let mut leafs = Vec::new(); + for chain_id in chain_ids { + let chain_root = + get_chain_root_from_id(&mut connection, chain_id, l2_block_number).await?; + leafs.push(chain_id_leaf_preimage(chain_root, chain_id)); + } + + let chain_merkle_tree = + MiniMerkleTree::<[u8; 96], KeccakHasher>::new(leafs.into_iter(), None); + + let mut chain_id_leaf_proof = chain_merkle_tree + .merkle_root_and_path(chain_id_leaf_proof_mask) + .1; + + let Some(local_root) = connection + .blocks_dal() + .get_l1_batch_local_root(l1_batch_number) + .await + .map_err(DalError::generalize)? + else { + return Ok(None); + }; + + // Chain tree is the right subtree of the aggregated tree. + // We append root of the left subtree to form full proof. + let chain_id_leaf_proof_mask = chain_id_leaf_proof_mask | (1 << chain_id_leaf_proof.len()); + chain_id_leaf_proof.push(local_root); + + Ok(Some(ChainAggProof { + chain_id_leaf_proof, + chain_id_leaf_proof_mask: chain_id_leaf_proof_mask.into(), + })) + } +} diff --git a/core/node/api_server/src/web3/namespaces/unstable/utils.rs b/core/node/api_server/src/web3/namespaces/unstable/utils.rs new file mode 100644 index 00000000000..6cb66569fef --- /dev/null +++ b/core/node/api_server/src/web3/namespaces/unstable/utils.rs @@ -0,0 +1,105 @@ +use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; +use zksync_system_constants::{ + message_root::{CHAIN_COUNT_KEY, CHAIN_INDEX_TO_ID_KEY, CHAIN_TREE_KEY}, + L2_MESSAGE_ROOT_ADDRESS, +}; +use zksync_types::{ + l2_to_l1_log::CHAIN_ID_LEAF_PADDING, web3::keccak256, AccountTreeId, L2BlockNumber, StorageKey, + H256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; +use zksync_web3_decl::error::Web3Error; + +pub(super) async fn get_chain_count( + connection: &mut Connection<'_, Core>, + block_number: L2BlockNumber, +) -> anyhow::Result { + let chain_count_key = CHAIN_COUNT_KEY; + let chain_count_storage_key = + message_root_log_key(H256::from_low_u64_be(chain_count_key as u64)); + let chain_count = connection + .storage_web3_dal() + .get_historical_value_unchecked(chain_count_storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + if h256_to_u256(chain_count) > u8::MAX.into() { + anyhow::bail!("Chain count doesn't fit in `u8`"); + } + Ok(chain_count.0[31]) +} + +pub(super) async fn get_chain_id_from_index( + connection: &mut Connection<'_, Core>, + chain_index: u8, + block_number: L2BlockNumber, +) -> Result { + let key = H256::from_slice(&keccak256( + &[ + H256::from_low_u64_be(chain_index as u64).0, + H256::from_low_u64_be(CHAIN_INDEX_TO_ID_KEY as u64).0, + ] + .concat(), + )); + let storage_key = message_root_log_key(key); + let chain_id = connection + .storage_web3_dal() + .get_historical_value_unchecked(storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + Ok(chain_id) +} + +pub(super) async fn get_chain_root_from_id( + connection: &mut Connection<'_, Core>, + chain_id: H256, + block_number: L2BlockNumber, +) -> Result { + let chain_tree_key = H256::from_slice(&keccak256( + &[chain_id.0, H256::from_low_u64_be(CHAIN_TREE_KEY as u64).0].concat(), + )); + let chain_sides_len_key = + u256_to_h256(h256_to_u256(chain_tree_key).overflowing_add(U256::one()).0); + let chain_sides_len_storage_key = message_root_log_key(chain_sides_len_key); + let chain_sides_len = connection + .storage_web3_dal() + .get_historical_value_unchecked(chain_sides_len_storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + + let last_element_pos = { + let length = h256_to_u256(chain_sides_len); + assert!( + length > U256::zero(), + "_sides.length is zero, chain is not registered" + ); + + length - 1 + }; + let sides_data_start_key = H256(keccak256(chain_sides_len_key.as_bytes())); + let chain_root_key = h256_to_u256(sides_data_start_key) + .overflowing_add(last_element_pos) + .0; + let chain_root_storage_key = message_root_log_key(u256_to_h256(chain_root_key)); + let chain_root = connection + .storage_web3_dal() + .get_historical_value_unchecked(chain_root_storage_key.hashed_key(), block_number) + .await + .map_err(DalError::generalize)?; + Ok(chain_root) +} + +pub(super) fn chain_id_leaf_preimage(chain_root: H256, chain_id: H256) -> [u8; 96] { + let mut full_preimage = [0u8; 96]; + + full_preimage[0..32].copy_from_slice(CHAIN_ID_LEAF_PADDING.as_bytes()); + full_preimage[32..64].copy_from_slice(&chain_root.0); + full_preimage[64..96].copy_from_slice(&chain_id.0); + + full_preimage +} + +fn message_root_log_key(key: H256) -> StorageKey { + let message_root = AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS); + StorageKey::new(message_root, key) +} diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index f8b374b35ba..65aee8d458c 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,7 +1,6 @@ use std::collections::HashMap; use anyhow::Context as _; -use once_cell::sync::Lazy; use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiError; @@ -10,28 +9,27 @@ use zksync_multivm::interface::VmExecutionResultAndLogs; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ api::{ - state_override::StateOverride, BlockDetails, BridgeAddresses, ChainAggProof, GetLogsFilter, - L1BatchDetails, L1ProcessingDetails, L2ToL1LogProof, LeafAggProof, LeafChainProof, Proof, - ProtocolVersion, StorageProof, TransactionDetails, + state_override::StateOverride, BlockDetails, BridgeAddresses, GetLogsFilter, + L1BatchDetails, L1ProcessingDetails, L2ToL1LogProof, Proof, ProtocolVersion, StorageProof, + TransactionDetails, }, - ethabi, fee::Fee, fee_model::{FeeParams, PubdataIndependentBatchFeeModelInput}, l1::L1Tx, l2::L2Tx, - l2_to_l1_log::{l2_to_l1_logs_tree_size, L2ToL1Log}, + l2_to_l1_log::{l2_to_l1_logs_tree_size, L2ToL1Log, LOG_PROOF_SUPPORTED_METADATA_VERSION}, tokens::ETHEREUM_ADDRESS, transaction_request::CallRequest, utils::storage_key_for_standard_token_balance, - web3::{keccak256, Bytes}, + web3::Bytes, AccountTreeId, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, StorageKey, - Transaction, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, L2_MESSAGE_ROOT_ADDRESS, + Transaction, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, U64, }; -use zksync_utils::{address_to_h256, h256_to_u256, u256_to_h256}; +use zksync_utils::{address_to_h256, h256_to_u256}; use zksync_web3_decl::{ client::{Client, L2}, - error::Web3Error, + error::{ClientRpcContext, Web3Error}, namespaces::{EthNamespaceClient, ZksNamespaceClient}, types::{Address, Token, H256}, }; @@ -43,17 +41,6 @@ use crate::{ web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, RpcState}, }; -pub static MESSAGE_ROOT_ADDED_CHAIN_BATCH_ROOT_EVENT: Lazy = Lazy::new(|| { - ethabi::long_signature( - "AppendedChainBatchRoot", - &[ - ethabi::ParamType::Uint(256), - ethabi::ParamType::Uint(256), - ethabi::ParamType::FixedBytes(32), - ], - ) -}); - #[derive(Debug)] pub(crate) struct ZksNamespace { state: RpcState, @@ -249,6 +236,14 @@ impl ZksNamespace { msg: H256, l2_log_position: Option, ) -> Result, Web3Error> { + if let Some(handler) = &self.state.l2_l1_log_proof_handler { + return handler + .get_l2_to_l1_msg_proof(block_number, sender, msg, l2_log_position) + .rpc_context("get_l2_to_l1_msg_proof") + .await + .map_err(Into::into); + } + let mut storage = self.state.acquire_connection().await?; self.state .start_info @@ -359,69 +354,61 @@ impl ZksNamespace { .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); let tree_size = l2_to_l1_logs_tree_size(protocol_version); - let (root, mut proof) = MiniMerkleTree::new(merkle_tree_leaves, Some(tree_size)) + let (local_root, proof) = MiniMerkleTree::new(merkle_tree_leaves, Some(tree_size)) .merkle_root_and_path(l1_log_index); - // FIXME Definitely refactor all of it - // For now it is always 0 - let aggregated_root = batch_meta.metadata.aggregation_root.unwrap(); - let final_root = KeccakHasher.compress(&root, &aggregated_root); - proof.push(aggregated_root); - - println!("\n\nTrying to get the final proof! {}\n\n", l1_batch_number); - - const EXPECTED_SYNC_LAYER_CHAIN_ID: u64 = 505; - - let mut log_leaf_proof = LogLeafProof::new(proof); - - let settlement_layer: u64 = self.state.api_config.sl_chain_id.0; - - if settlement_layer == EXPECTED_SYNC_LAYER_CHAIN_ID { - println!("\nI am on sync layer!!\n"); - // We are on top of sync layer. - // Maaybe there is an aggregation proof waiting - - // Create a client for pinging the RPC. - let client: Client = Client::http( - self.state - .api_config - .settlement_layer_url - .clone() - .unwrap() - .parse() - .unwrap(), - )? - .for_network(L2::from(L2ChainId(self.state.api_config.l1_chain_id.0))) - .build(); - - println!("\ncreated client!!\n"); - - let proof = client - .get_aggregated_batch_inclusion_proof( - L2_MESSAGE_ROOT_ADDRESS, - l1_batch_number, - self.state.api_config.l2_chain_id.0 as u32, - ) - .await - .unwrap_or_else(|err| { - panic!("Failed reaching to the SL: {:#?}", err); - }); - println!("Proof: {:#?}", proof); + if protocol_version.is_pre_gateway() { + return Ok(Some(L2ToL1LogProof { + proof, + root: local_root, + id: l1_log_index as u32, + })); + } - if let Some(proof) = proof { - println!("Found proof for my own batch"); + let aggregated_root = batch_meta + .metadata + .aggregation_root + .expect("`aggregation_root` must be present for post-gateway branch"); + let root = KeccakHasher.compress(&local_root, &aggregated_root); - log_leaf_proof.append_aggregation_layer(proof); - } else { + let mut log_leaf_proof = proof; + log_leaf_proof.push(aggregated_root); + + let settlement_layer_chain_id = self.state.api_config.sl_chain_id.0; + let l1_chain_id = self.state.api_config.l1_chain_id.0; + + let (batch_proof_len, batch_chain_proof) = if settlement_layer_chain_id != l1_chain_id { + let Some(batch_chain_proof) = storage + .blocks_dal() + .get_l1_batch_chain_merkle_path(l1_batch_number) + .await + .map_err(DalError::generalize)? + else { return Ok(None); - } - } + }; + + (batch_chain_proof.batch_proof_len, batch_chain_proof.proof) + } else { + (0, Vec::new()) + }; + + let proof = { + let mut metadata = [0u8; 32]; + metadata[0] = LOG_PROOF_SUPPORTED_METADATA_VERSION; + metadata[1] = log_leaf_proof.len() as u8; + metadata[2] = batch_proof_len as u8; - let proof = log_leaf_proof.encode(); + let mut result = vec![H256(metadata)]; + + result.extend(log_leaf_proof); + result.extend(batch_chain_proof); + + result + }; Ok(Some(L2ToL1LogProof { proof, - root: final_root, + root, id: l1_log_index as u32, })) } @@ -431,6 +418,14 @@ impl ZksNamespace { tx_hash: H256, index: Option, ) -> Result, Web3Error> { + if let Some(handler) = &self.state.l2_l1_log_proof_handler { + return handler + .get_l2_to_l1_log_proof(tx_hash, index) + .rpc_context("get_l2_to_l1_log_proof") + .await + .map_err(Into::into); + } + let mut storage = self.state.acquire_connection().await?; let Some((l1_batch_number, l1_batch_tx_index)) = storage .blocks_web3_dal() @@ -441,6 +436,11 @@ impl ZksNamespace { return Ok(None); }; + self.state + .start_info + .ensure_not_pruned(l1_batch_number, &mut storage) + .await?; + let log_proof = self .get_l2_to_l1_log_proof_inner( &mut storage, @@ -452,401 +452,6 @@ impl ZksNamespace { Ok(log_proof) } - async fn l1_batch_number_with_agg_batch( - &self, - storage: &mut Connection<'_, Core>, - latest_sealed_block_number: L2BlockNumber, - message_root_addr: Address, - batch_number: L1BatchNumber, - chain_id: u32, - ) -> Result, Web3Error> { - let add_chain_logs = storage - .events_web3_dal() - .get_logs( - GetLogsFilter { - // FIXME: this is somewhat inefficient, better ways need to be created - from_block: 0.into(), - to_block: latest_sealed_block_number, - addresses: vec![message_root_addr], - topics: vec![ - (1, vec![*MESSAGE_ROOT_ADDED_CHAIN_BATCH_ROOT_EVENT]), - (2, vec![u256_to_h256(U256::from(chain_id))]), - (3, vec![u256_to_h256(U256::from(batch_number.0))]), - ], - }, - self.state.api_config.req_entities_limit, - ) - .await - .map_err(DalError::generalize)?; - - println!("LOGS = {:#?}", add_chain_logs); - - // At most one such log is expected - assert!(add_chain_logs.len() <= 1); - - if add_chain_logs.is_empty() { - return Ok(None); - } - - let Some(l1_batch_number) = add_chain_logs[0].l1_batch_number else { - return Ok(None); - }; - - Ok(Some(l1_batch_number.as_u32())) - } - - // FIXME: `message_root_addr` is most often constant. The only reason we may want to provide a custom value is - // for L1, but at this point maybe it could known from the config, not sure. - pub async fn get_aggregated_batch_inclusion_proof_impl( - &self, - message_root_addr: Address, - searched_batch_number: L1BatchNumber, - searched_chain_id: u32, - ) -> Result, Web3Error> { - println!("heee"); - let mut storage = self.state.acquire_connection().await?; - // Proofs only available for finalized batches - let latest_sealed_block_number = storage - .blocks_dal() - .get_last_sealed_l2_block_header() - .await - .map_err(DalError::generalize)? - .map(|header| header.number) - .unwrap_or_default(); - - let l1_batch_number_with_agg_batch = self - .l1_batch_number_with_agg_batch( - &mut storage, - latest_sealed_block_number, - message_root_addr, - searched_batch_number, - searched_chain_id, - ) - .await?; - println!("hee2"); - let Some(l1_batch_number_with_agg_batch) = l1_batch_number_with_agg_batch else { - return Ok(None); - }; - println!("\n\nhee3 -- {}\n\n", l1_batch_number_with_agg_batch); - - let local_msg_root = storage - .blocks_dal() - .get_l1_batch_metadata(L1BatchNumber(l1_batch_number_with_agg_batch)) - .await - .map_err(DalError::generalize)? - .and_then(|metadata| metadata.metadata.local_root); - - let Some(local_msg_root) = local_msg_root else { - return Ok(None); - }; - - let batch_proof = self - .get_batch_inclusion_proof_impl( - message_root_addr, - searched_batch_number, - searched_chain_id, - latest_sealed_block_number, - l1_batch_number_with_agg_batch, - ) - .await?; - let Some(leaf_proof) = batch_proof else { - return Ok(None); - }; - let correct_l2_block_number = storage - .blocks_dal() - .get_l2_block_range_of_l1_batch(L1BatchNumber(l1_batch_number_with_agg_batch)) - .await - .map_err(DalError::generalize)?; - let Some((_, max_l2_block_number)) = correct_l2_block_number else { - return Ok(None); - }; - - let chain_proof = self - .get_chain_inclusion_proof_impl( - message_root_addr, - searched_chain_id, - max_l2_block_number, - local_msg_root, - ) - .await?; - let result = LeafAggProof { - leaf_chain_proof: leaf_proof, - chain_agg_proof: chain_proof.unwrap(), - local_msg_root, - sl_batch_number: l1_batch_number_with_agg_batch.into(), - // this is the settlement layer - sl_chain_id: self.state.api_config.l2_chain_id.0.into(), - }; - - Ok(Some(result)) - } - - pub async fn get_batch_inclusion_proof_impl( - &self, - message_root_addr: Address, - searched_batch_number: L1BatchNumber, - searched_chain_id: u32, - latest_sealed_block_number: L2BlockNumber, - l1_batch_number_with_agg_batch: u32, - ) -> Result, Web3Error> { - let mut storage = self.state.acquire_connection().await?; - - // FIXME: move as api config - // Firstly, let's grab all events that correspond to batch being inserted into the chain_id tree. - println!("hee4"); - let add_batch_logs = storage - .events_web3_dal() - .get_logs( - GetLogsFilter { - // FIXME: this is somewhat inefficient, better ways need to be created - from_block: 0.into(), - to_block: latest_sealed_block_number, - addresses: vec![message_root_addr], - topics: vec![(1, vec![*MESSAGE_ROOT_ADDED_CHAIN_BATCH_ROOT_EVENT])], - }, - // FIXME: this is a bit inefficient, better ways need to be created - i32::MAX as usize, - ) - .await - .map_err(DalError::generalize)?; - - println!("Add batch logs: {:#?}", add_batch_logs); - - let mut batch_leaf_proof_mask = None; - - let mut chain_id_merkle_tree = - MiniMerkleTree::<[u8; 96], KeccakHasher>::new(Vec::<[u8; 96]>::new().into_iter(), None); - let mut cnt = 0; - - for add_batch_log in add_batch_logs.iter() { - let Some(batch_num) = add_batch_log.l1_batch_number else { - continue; - }; - let batch_num: u32 = batch_num.as_u32(); - - if batch_num > l1_batch_number_with_agg_batch { - continue; - }; - - let chain_id = h256_to_u256(add_batch_log.topics[1]); - let batch_number = h256_to_u256(add_batch_log.topics[2]); - - if chain_id.as_u32() != searched_chain_id { - continue; - } - - if batch_number.as_u32() == searched_batch_number.0 { - println!("relevant batch found! {:#?}", add_batch_log); - batch_leaf_proof_mask = Some(cnt); - } - - println!("appended log: {:#?}", add_batch_log); - - let batch_root = H256::from_slice(&add_batch_log.data.0); - chain_id_merkle_tree.push(Self::batch_leaf_preimage(batch_root, batch_number.as_u32())); - - println!("new batch root = {:#?}", chain_id_merkle_tree.merkle_root()); - - cnt += 1; - } - let Some(batch_leaf_proof_mask) = batch_leaf_proof_mask else { - return Ok(None); - }; - - let result = chain_id_merkle_tree.merkle_root_and_path(batch_leaf_proof_mask); - let root = result.0; - let batch_leaf_proof = result.1; - - println!( - "EXPECTED ROOT FOR {} / {} = {}", - searched_chain_id, batch_leaf_proof_mask, root - ); - - Ok(Some(LeafChainProof { - batch_leaf_proof, - batch_leaf_proof_mask: batch_leaf_proof_mask.into(), - })) - } - - pub async fn get_chain_inclusion_proof_impl( - &self, - message_root_addr: Address, - searched_chain_id: u32, - block_number: L2BlockNumber, - local_msg_root: H256, - ) -> Result, Web3Error> { - let mut storage = self.state.acquire_connection().await?; - let storage_key_num = U256::zero(); // kl todo - let storage_key = StorageKey::new( - AccountTreeId::new(message_root_addr), - u256_to_h256(storage_key_num), - ); - let chain_count = storage - .storage_web3_dal() - .get_historical_value_unchecked(storage_key.hashed_key(), block_number) - .await - .map_err(DalError::generalize)?; - let chain_count_integer = chain_count.0[31]; - println!("kl todo chain count = {:#?}", chain_count_integer); - - let mut full_chain_merkle_tree = - MiniMerkleTree::<[u8; 96], KeccakHasher>::new(Vec::<[u8; 96]>::new().into_iter(), None); - let mut chain_id_leaf_proof_mask = None; - - for i in 0..chain_count_integer { - let chain_id = self - .get_chain_id_from_index_impl(i.into(), block_number) - .await - .unwrap(); - let chain_root = self - .get_chain_root_from_id_impl(chain_id, block_number) - .await - .unwrap(); - full_chain_merkle_tree.push(Self::chain_id_leaf_preimage(chain_root, chain_id)); - - if h256_to_u256(chain_id).as_u32() == searched_chain_id { - chain_id_leaf_proof_mask = Some(i as usize); - } - } - - let Some(chain_id_leaf_proof_mask) = chain_id_leaf_proof_mask else { - return Ok(None); - }; - - let chain_id_leaf_proof = full_chain_merkle_tree - .merkle_root_and_path(chain_id_leaf_proof_mask) - .1; - - println!( - "kl todo 2 {:?}, {:?}", - chain_id_leaf_proof_mask, - chain_id_leaf_proof.len() - ); - - let full_agg_root = full_chain_merkle_tree.merkle_root(); - println!( - "\n\n FULL AGG ROOT FOR BATCH = {:#?}\n\n", - hex::encode(full_agg_root.0) - ); - let mut chain_id_leaf_proof = full_chain_merkle_tree - .merkle_root_and_path(chain_id_leaf_proof_mask) - .1; - - chain_id_leaf_proof.push(local_msg_root); - let chain_id_leaf_proof_mask = - chain_id_leaf_proof_mask | (1 << (chain_id_leaf_proof.len() - 1)); - - Ok(Some(ChainAggProof { - chain_id_leaf_proof, - chain_id_leaf_proof_mask: chain_id_leaf_proof_mask.into(), - })) - } - - pub async fn get_chain_id_from_index_impl( - &self, - index: u32, - block_number: L2BlockNumber, - ) -> Result { - let mut storage = self.state.acquire_connection().await?; - - let chain_id_mapping_slot = U256::from(2); - let chain_id_index = H256::from_slice(&keccak256( - &[ - u256_to_h256(U256::from(index)).0, - u256_to_h256(chain_id_mapping_slot).to_fixed_bytes(), - ] - .concat(), - )); - println!("kl todo chain_id_index = {:#?}", chain_id_index); - - let storage_key = self.get_message_root_log_key(chain_id_index); - let chain_id = storage - .storage_web3_dal() - .get_historical_value_unchecked(storage_key.hashed_key(), block_number) - .await - .map_err(DalError::generalize)?; - println!("kl todo chain_id = {:#?}", chain_id); - Ok(chain_id) - } - - pub async fn get_chain_root_from_id_impl( - &self, - chain_id: H256, - block_number: L2BlockNumber, - ) -> Result { - let mut storage = self.state.acquire_connection().await?; - - let chain_tree_mapping_slot = U256::from(7); - let chain_tree_slot = H256::from_slice(&keccak256( - &[ - chain_id.0, - u256_to_h256(chain_tree_mapping_slot).to_fixed_bytes(), - ] - .concat(), - )); - println!("kl todo chain_tree_slot = {:#?}", chain_tree_slot); - - let chain_sides_slot = u256_to_h256(h256_to_u256(chain_tree_slot) + 1); - println!("kl todo chain_sides_slot = {:#?}", chain_sides_slot); - - let length_storage_key = self.get_message_root_log_key(chain_sides_slot); - let length_encoding = storage - .storage_web3_dal() - .get_historical_value_unchecked(length_storage_key.hashed_key(), block_number) - .await - .map_err(DalError::generalize)?; - - let length = h256_to_u256(length_encoding); - // Here we assume that length is non zero - assert!(length > U256::zero(), "Length is zero"); - let last_elem_pos = length - 1; - - let sides_data_start = H256(keccak256(chain_sides_slot.as_bytes())); - - let chain_root_slot = self - .get_message_root_log_key(u256_to_h256(h256_to_u256(sides_data_start) + last_elem_pos)); - println!("kl todo length_encoding = {:#?}", length_encoding); - println!("kl todo chain_root_slot = {:#?}", chain_root_slot); - let chain_root = storage - .storage_web3_dal() - .get_historical_value_unchecked(chain_root_slot.hashed_key(), block_number) - .await - .map_err(DalError::generalize)?; - println!("kl todo chain_root = {:#?}", chain_root); - Ok(chain_root) - } - - pub fn get_message_root_log_key(&self, key: H256) -> StorageKey { - let message_root = AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS); - StorageKey::new(message_root, key) - } - - pub fn batch_leaf_preimage(batch_root: H256, batch_number: u32) -> [u8; 96] { - let prefix = - hex::decode("d82fec4a37cbdc47f1e5cc4ad64deacf34a48e6f7c61fa5b68fd58e543259cf4") - .unwrap(); - let mut full_preimage = [0u8; 96]; - - full_preimage[0..32].copy_from_slice(&prefix); - full_preimage[32..64].copy_from_slice(&batch_root.0); - full_preimage[64..96].copy_from_slice(&u256_to_h256(batch_number.into()).0); - - full_preimage - } - - pub fn chain_id_leaf_preimage(chain_root: H256, chain_id: H256) -> [u8; 96] { - let prefix = - hex::decode("39bc69363bb9e26cf14240de4e22569e95cf175cfbcf1ade1a47a253b4bf7f61") - .unwrap(); - let mut full_preimage = [0u8; 96]; - - full_preimage[0..32].copy_from_slice(&prefix); - full_preimage[32..64].copy_from_slice(&chain_root.0); - full_preimage[64..96].copy_from_slice(&chain_id.0); - - full_preimage - } - pub async fn get_l1_batch_number_impl(&self) -> Result { let mut storage = self.state.acquire_connection().await?; let l1_batch_number = storage @@ -1184,71 +789,3 @@ impl ZksNamespace { }) } } - -struct TreeLeafProof { - leaf_proof: Vec, - batch_leaf_proof: Option, -} - -impl TreeLeafProof { - pub fn encode(self) -> Vec { - const SUPPORTED_METADATA_VERSION: u8 = 1; - - let log_leaf_proof_len = self.leaf_proof.len(); - - let (batch_leaf_proof_len, batch_leaf_proof) = if let Some(x) = self.batch_leaf_proof { - x.encode() - } else { - (0, vec![]) - }; - - assert!(log_leaf_proof_len < u8::MAX as usize); - assert!(batch_leaf_proof_len < u8::MAX as u32); - - let mut metadata = [0u8; 32]; - metadata[0] = SUPPORTED_METADATA_VERSION; - metadata[1] = log_leaf_proof_len as u8; - metadata[2] = batch_leaf_proof_len as u8; - - let mut result = vec![H256(metadata)]; - - result.extend(self.leaf_proof); - result.extend(batch_leaf_proof); - - result - } -} - -struct LogLeafProof { - agg_proofs: Vec, -} - -impl LogLeafProof { - pub fn new(leaf_proof: Vec) -> Self { - let bottom_layer = TreeLeafProof { - leaf_proof, - batch_leaf_proof: None, - }; - - Self { - agg_proofs: vec![bottom_layer], - } - } - - pub fn encode(self) -> Vec { - let mut result = vec![]; - for i in self.agg_proofs { - result.extend(i.encode()); - } - result - } - - pub fn append_aggregation_layer(&mut self, proof: LeafAggProof) { - let chain_id_leaf_proof = proof.chain_agg_proof.chain_id_leaf_proof.clone(); - self.agg_proofs.last_mut().unwrap().batch_leaf_proof = Some(proof); - self.agg_proofs.push(TreeLeafProof { - leaf_proof: chain_id_leaf_proof, - batch_leaf_proof: None, - }) - } -} diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index ff9f7af4a87..252519d704e 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -23,7 +23,11 @@ use zksync_types::{ api, commitment::L1BatchCommitmentMode, l2::L2Tx, transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, SLChainId, H256, U256, U64, }; -use zksync_web3_decl::{error::Web3Error, types::Filter}; +use zksync_web3_decl::{ + client::{DynClient, L2}, + error::Web3Error, + types::Filter, +}; use super::{ backend_jsonrpsee::MethodTracer, @@ -263,6 +267,7 @@ pub(crate) struct RpcState { pub(super) mempool_cache: Option, pub(super) last_sealed_l2_block: SealedL2BlockNumber, pub(super) bridge_addresses_handle: BridgeAddressesHandle, + pub(super) l2_l1_log_proof_handler: Option>>, } impl RpcState { diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index 7acd163f78e..62014f92f27 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -19,6 +19,8 @@ zksync_system_constants.workspace = true zksync_eth_client.workspace = true zksync_shared_metrics.workspace = true zksync_mini_merkle_tree.workspace = true +zksync_utils.workspace = true +zksync_web3_decl.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true @@ -26,7 +28,10 @@ thiserror.workspace = true async-trait.workspace = true tracing.workspace = true async-recursion.workspace = true +itertools.workspace = true [dev-dependencies] zksync_concurrency.workspace = true test-log.workspace = true +hex.workspace = true +bincode.workspace = true diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index ac5fc86c6e9..65f805f3cf4 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,21 +1,27 @@ -use std::fmt; +use std::{fmt, sync::Arc}; use anyhow::Context; use zksync_contracts::{ - getters_facet_contract, state_transition_manager_contract, verifier_contract, + getters_facet_contract, l2_message_root, state_transition_manager_contract, verifier_contract, }; use zksync_eth_client::{ clients::{DynClient, L1}, CallFunctionArgs, ClientError, ContractCallError, EnrichedClientError, EnrichedClientResult, EthInterface, }; +use zksync_system_constants::L2_MESSAGE_ROOT_ADDRESS; use zksync_types::{ + api::{ChainAggProof, Log}, ethabi::Contract, - web3::{BlockId, BlockNumber, FilterBuilder, Log}, - Address, SLChainId, H256, U256, + web3::{BlockId, BlockNumber, Filter, FilterBuilder}, + Address, L1BatchNumber, L2ChainId, SLChainId, H256, U256, U64, +}; +use zksync_web3_decl::{ + client::{Network, L2}, + namespaces::{EthNamespaceClient, UnstableNamespaceClient, ZksNamespaceClient}, }; -/// L1 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. +/// Common L1 and L2 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. #[async_trait::async_trait] pub trait EthClient: 'static + fmt::Debug + Send + Sync { /// Returns events in a given block range. @@ -27,6 +33,10 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { topic2: Option, retries_left: usize, ) -> EnrichedClientResult>; + + /// Returns either finalized L1 block number or block number that satisfies `self.confirmations_for_eth_event` if it's set. + async fn confirmed_block_number(&self) -> EnrichedClientResult; + /// Returns finalized L1 block number. async fn finalized_block_number(&self) -> EnrichedClientResult; @@ -40,7 +50,17 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { packed_version: H256, ) -> EnrichedClientResult>>; + /// Returns ID of the chain. async fn chain_id(&self) -> EnrichedClientResult; + + /// Returns chain root for `l2_chain_id` at the moment right after `block_number`. + /// `block_number` is block number on SL. + /// `l2_chain_id` is chain id of L2. + async fn get_chain_root( + &self, + block_number: U64, + l2_chain_id: L2ChainId, + ) -> Result; } pub const RETRY_LIMIT: usize = 5; @@ -50,10 +70,10 @@ const TOO_MANY_RESULTS_RETH: &str = "length limit exceeded"; const TOO_BIG_RANGE_RETH: &str = "query exceeds max block range"; const TOO_MANY_RESULTS_CHAINSTACK: &str = "range limit exceeded"; -/// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). +/// Implementation of [`EthClient`] based on HTTP JSON-RPC. #[derive(Debug, Clone)] -pub struct EthHttpQueryClient { - client: Box>, +pub struct EthHttpQueryClient { + client: Box>, diamond_proxy_addr: Address, governance_address: Address, new_upgrade_cut_data_signature: H256, @@ -62,12 +82,16 @@ pub struct EthHttpQueryClient { chain_admin_address: Option
, verifier_contract_abi: Contract, getters_facet_contract_abi: Contract, + message_root_abi: Contract, confirmations_for_eth_event: Option, } -impl EthHttpQueryClient { +impl EthHttpQueryClient +where + Box>: GetLogsClient, +{ pub fn new( - client: Box>, + client: Box>, diamond_proxy_addr: Address, state_transition_manager_address: Option
, chain_admin_address: Option
, @@ -92,6 +116,7 @@ impl EthHttpQueryClient { .signature(), verifier_contract_abi: verifier_contract(), getters_facet_contract_abi: getters_facet_contract(), + message_root_abi: l2_message_root(), confirmations_for_eth_event, } } @@ -102,6 +127,7 @@ impl EthHttpQueryClient { Some(self.governance_address), self.state_transition_manager_address, self.chain_admin_address, + Some(L2_MESSAGE_ROOT_ADDRESS), ] .into_iter() .flatten() @@ -126,7 +152,7 @@ impl EthHttpQueryClient { builder = builder.address(addresses); } let filter = builder.build(); - let mut result = self.client.logs(&filter).await; + let mut result = self.client.get_logs(filter).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. @@ -216,7 +242,10 @@ impl EthHttpQueryClient { } #[async_trait::async_trait] -impl EthClient for EthHttpQueryClient { +impl EthClient for EthHttpQueryClient +where + Box>: EthInterface + GetLogsClient, +{ async fn scheduler_vk_hash( &self, verifier_address: Address, @@ -274,27 +303,31 @@ impl EthClient for EthHttpQueryClient { .await } - async fn finalized_block_number(&self) -> EnrichedClientResult { + async fn confirmed_block_number(&self) -> EnrichedClientResult { if let Some(confirmations) = self.confirmations_for_eth_event { let latest_block_number = self.client.block_number().await?.as_u64(); Ok(latest_block_number.saturating_sub(confirmations)) } else { - let block = self - .client - .block(BlockId::Number(BlockNumber::Finalized)) - .await? - .ok_or_else(|| { - let err = ClientError::Custom("Finalized block must be present on L1".into()); - EnrichedClientError::new(err, "block") - })?; - let block_number = block.number.ok_or_else(|| { - let err = ClientError::Custom("Finalized block must contain number".into()); - EnrichedClientError::new(err, "block").with_arg("block", &block) - })?; - Ok(block_number.as_u64()) + self.finalized_block_number().await } } + async fn finalized_block_number(&self) -> EnrichedClientResult { + let block = self + .client + .block(BlockId::Number(BlockNumber::Finalized)) + .await? + .ok_or_else(|| { + let err = ClientError::Custom("Finalized block must be present on L1".into()); + EnrichedClientError::new(err, "block") + })?; + let block_number = block.number.ok_or_else(|| { + let err = ClientError::Custom("Finalized block must contain number".into()); + EnrichedClientError::new(err, "block").with_arg("block", &block) + })?; + Ok(block_number.as_u64()) + } + async fn get_total_priority_txs(&self) -> Result { CallFunctionArgs::new("getTotalPriorityTxs", ()) .for_contract(self.diamond_proxy_addr, &self.getters_facet_contract_abi) @@ -304,6 +337,157 @@ impl EthClient for EthHttpQueryClient { } async fn chain_id(&self) -> EnrichedClientResult { - Ok(self.client.fetch_chain_id().await?) + self.client.fetch_chain_id().await + } + + async fn get_chain_root( + &self, + block_number: U64, + l2_chain_id: L2ChainId, + ) -> Result { + CallFunctionArgs::new("getChainRoot", U256::from(l2_chain_id.0)) + .with_block(BlockId::Number(block_number.into())) + .for_contract(L2_MESSAGE_ROOT_ADDRESS, &self.message_root_abi) + .call(&self.client) + .await + } +} + +/// Encapsulates `eth_getLogs` calls. +#[async_trait::async_trait] +pub trait GetLogsClient: 'static + fmt::Debug + Send + Sync { + /// Returns L2 version of [`Log`] with L2-specific fields, e.g. `l1_batch_number`. + /// L1 clients fill such fields with `None`. + async fn get_logs(&self, filter: Filter) -> EnrichedClientResult>; +} + +#[async_trait::async_trait] +impl GetLogsClient for Box> { + async fn get_logs(&self, filter: Filter) -> EnrichedClientResult> { + Ok(self + .logs(&filter) + .await? + .into_iter() + .map(Into::into) + .collect()) + } +} + +#[async_trait::async_trait] +impl GetLogsClient for Box> { + async fn get_logs(&self, filter: Filter) -> EnrichedClientResult> { + EthNamespaceClient::get_logs(self, filter.into()) + .await + .map_err(|err| EnrichedClientError::new(err, "eth_getLogs")) + } +} + +/// L2 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. +/// Trait extension for [`EthClient`]. +#[async_trait::async_trait] +pub trait L2EthClient: EthClient { + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> EnrichedClientResult>; + + async fn get_chain_root_l2( + &self, + l1_batch_number: L1BatchNumber, + l2_chain_id: L2ChainId, + ) -> Result, ContractCallError>; +} + +#[async_trait::async_trait] +impl L2EthClient for EthHttpQueryClient { + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + chain_id: L2ChainId, + ) -> EnrichedClientResult> { + self.client + .get_chain_log_proof(l1_batch_number, chain_id) + .await + .map_err(|err| EnrichedClientError::new(err, "unstable_getChainLogProof")) + } + + async fn get_chain_root_l2( + &self, + l1_batch_number: L1BatchNumber, + l2_chain_id: L2ChainId, + ) -> Result, ContractCallError> { + let l2_block_range = self + .client + .get_l2_block_range(l1_batch_number) + .await + .map_err(|err| EnrichedClientError::new(err, "zks_getL1BatchBlockRange"))?; + if let Some((_, l2_block_number)) = l2_block_range { + self.get_chain_root(l2_block_number, l2_chain_id) + .await + .map(Some) + } else { + Ok(None) + } + } +} + +/// Wrapper for L2 client object. +/// It is used for L2EthClient -> EthClient dyn upcasting coercion: +/// Arc -> L2EthClientW -> Arc +#[derive(Debug, Clone)] +pub struct L2EthClientW(pub Arc); + +#[async_trait::async_trait] +impl EthClient for L2EthClientW { + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + topic1: H256, + topic2: Option, + retries_left: usize, + ) -> EnrichedClientResult> { + self.0 + .get_events(from, to, topic1, topic2, retries_left) + .await + } + + async fn confirmed_block_number(&self) -> EnrichedClientResult { + self.0.confirmed_block_number().await + } + + async fn finalized_block_number(&self) -> EnrichedClientResult { + self.0.finalized_block_number().await + } + + async fn get_total_priority_txs(&self) -> Result { + self.0.get_total_priority_txs().await + } + + async fn scheduler_vk_hash( + &self, + verifier_address: Address, + ) -> Result { + self.0.scheduler_vk_hash(verifier_address).await + } + + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + self.0.diamond_cut_by_version(packed_version).await + } + + async fn chain_id(&self) -> EnrichedClientResult { + self.0.chain_id().await + } + + async fn get_chain_root( + &self, + block_number: U64, + l2_chain_id: L2ChainId, + ) -> Result { + self.0.get_chain_root(block_number, l2_chain_id).await } } diff --git a/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs new file mode 100644 index 00000000000..581a1f6486c --- /dev/null +++ b/core/node/eth_watch/src/event_processors/appended_chain_batch_root.rs @@ -0,0 +1,237 @@ +use std::sync::Arc; + +use anyhow::Context; +use itertools::Itertools; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::{ + api::{ChainAggProof, Log}, + ethabi, + l2_to_l1_log::{ + BatchAndChainMerklePath, BATCH_LEAF_PADDING, LOG_PROOF_SUPPORTED_METADATA_VERSION, + }, + L1BatchNumber, L2ChainId, SLChainId, H256, U256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use crate::{ + client::L2EthClient, + event_processors::{EventProcessor, EventProcessorError, EventsSource}, +}; + +/// Responsible for `AppendedChainBatchRoot` events and saving `BatchAndChainMerklePath` for batches. +#[derive(Debug)] +pub struct BatchRootProcessor { + next_batch_number_lower_bound: L1BatchNumber, + appended_chain_batch_root_signature: H256, + merkle_tree: MiniMerkleTree<[u8; 96]>, + l2_chain_id: L2ChainId, + sl_l2_client: Arc, +} + +impl BatchRootProcessor { + pub fn new( + next_batch_number_lower_bound: L1BatchNumber, + merkle_tree: MiniMerkleTree<[u8; 96]>, + l2_chain_id: L2ChainId, + sl_l2_client: Arc, + ) -> Self { + Self { + next_batch_number_lower_bound, + appended_chain_batch_root_signature: ethabi::long_signature( + "AppendedChainBatchRoot", + &[ + ethabi::ParamType::Uint(256), + ethabi::ParamType::Uint(256), + ethabi::ParamType::FixedBytes(32), + ], + ), + merkle_tree, + l2_chain_id, + sl_l2_client, + } + } +} + +#[async_trait::async_trait] +impl EventProcessor for BatchRootProcessor { + async fn process_events( + &mut self, + storage: &mut Connection<'_, Core>, + events: Vec, + ) -> Result { + let events_count = events.len(); + let mut transaction = storage + .start_transaction() + .await + .map_err(DalError::generalize)?; + + let grouped_events: Vec<_> = events + .into_iter() + .map(|log| { + let sl_l1_batch_number = L1BatchNumber( + log.l1_batch_number + .expect("Missing L1 batch number for finalized event") + .as_u32(), + ); + let chain_l1_batch_number = L1BatchNumber(h256_to_u256(log.topics[2]).as_u32()); + let logs_root_hash = H256::from_slice(&log.data.0); + + (sl_l1_batch_number, chain_l1_batch_number, logs_root_hash) + }) + .group_by(|(sl_l1_batch_number, _, _)| *sl_l1_batch_number) + .into_iter() + .map(|(sl_l1_batch_number, group)| { + let group: Vec<_> = group + .into_iter() + .map(|(_, chain_l1_batch_number, logs_root_hash)| { + (chain_l1_batch_number, logs_root_hash) + }) + .collect(); + + (sl_l1_batch_number, group) + }) + .collect(); + + let next_batch_number_lower_bound = self.next_batch_number_lower_bound; + let new_events = grouped_events + .into_iter() + .skip_while(|(_sl_l1_batch_number, events)| { + let first_event = events.first().unwrap(); + let last_event = events.last().unwrap(); + + match ( + first_event.0 < next_batch_number_lower_bound, + last_event.0 < next_batch_number_lower_bound, + ) { + (true, true) => true, // skip + (false, false) => false, // do not skip + _ => { + panic!("batch range was partially processed"); + } + } + }); + + let sl_chain_id = self.sl_l2_client.chain_id().await?; + for (sl_l1_batch_number, chain_batches) in new_events { + let chain_agg_proof = self + .sl_l2_client + .get_chain_log_proof(sl_l1_batch_number, self.l2_chain_id) + .await? + .context("Missing chain log proof for finalized batch")?; + let chain_proof_vector = + Self::chain_proof_vector(sl_l1_batch_number, chain_agg_proof, sl_chain_id); + + for (batch_number, batch_root) in &chain_batches { + let root_from_db = transaction + .blocks_dal() + .get_l1_batch_l2_l1_merkle_root(*batch_number) + .await + .map_err(DalError::generalize)? + .context("Missing l2_l1_merkle_root for finalized batch")?; + assert_eq!(root_from_db, *batch_root); + + self.merkle_tree + .push(Self::batch_leaf_preimage(*batch_root, *batch_number)); + self.next_batch_number_lower_bound = *batch_number + 1; + } + + let chain_root_local = self.merkle_tree.merkle_root(); + let chain_root_remote = self + .sl_l2_client + .get_chain_root_l2(sl_l1_batch_number, self.l2_chain_id) + .await?; + assert_eq!( + chain_root_local, + chain_root_remote.unwrap(), + "Chain root mismatch, l1 batch number #{sl_l1_batch_number}" + ); + + let number_of_leaves = self.merkle_tree.length(); + let batch_proofs = (0..chain_batches.len()).map(|i| { + let leaf_position = number_of_leaves - chain_batches.len() + i; + let batch_proof = self + .merkle_tree + .merkle_root_and_path_by_absolute_index(leaf_position) + .1; + let batch_proof_len = batch_proof.len() as u32; + let mut proof = vec![H256::from_low_u64_be(leaf_position as u64)]; + proof.extend(batch_proof); + proof.extend(chain_proof_vector.clone()); + + BatchAndChainMerklePath { + batch_proof_len, + proof, + } + }); + + for ((batch_number, _), proof) in chain_batches.iter().zip(batch_proofs) { + tracing::info!(%batch_number, "Saving batch-chain merkle path"); + transaction + .blocks_dal() + .set_batch_chain_merkle_path(*batch_number, proof) + .await + .map_err(DalError::generalize)?; + } + } + + transaction.commit().await.map_err(DalError::generalize)?; + + Ok(events_count) + } + + fn topic1(&self) -> H256 { + self.appended_chain_batch_root_signature + } + + fn topic2(&self) -> Option { + Some(H256::from_low_u64_be(self.l2_chain_id.0)) + } + + fn event_source(&self) -> EventsSource { + EventsSource::SL + } + + fn event_type(&self) -> EventType { + EventType::ChainBatchRoot + } + + fn only_finalized_block(&self) -> bool { + true + } +} + +impl BatchRootProcessor { + pub(crate) fn batch_leaf_preimage(batch_root: H256, batch_number: L1BatchNumber) -> [u8; 96] { + let mut full_preimage = [0u8; 96]; + + full_preimage[0..32].copy_from_slice(BATCH_LEAF_PADDING.as_bytes()); + full_preimage[32..64].copy_from_slice(batch_root.as_bytes()); + full_preimage[64..96] + .copy_from_slice(H256::from_low_u64_be(batch_number.0 as u64).as_bytes()); + + full_preimage + } + + fn chain_proof_vector( + sl_l1_batch_number: L1BatchNumber, + chain_agg_proof: ChainAggProof, + sl_chain_id: SLChainId, + ) -> Vec { + let sl_encoded_data = U256::from(sl_l1_batch_number.0) * U256::from(2).pow(128.into()) + + chain_agg_proof.chain_id_leaf_proof_mask; + + let mut metadata = [0u8; 32]; + metadata[0] = LOG_PROOF_SUPPORTED_METADATA_VERSION; + metadata[1] = chain_agg_proof.chain_id_leaf_proof.len() as u8; + + let mut chain_proof_vector = vec![ + u256_to_h256(sl_encoded_data), + H256::from_low_u64_be(sl_chain_id.0), + H256(metadata), + ]; + chain_proof_vector.extend(chain_agg_proof.chain_id_leaf_proof); + + chain_proof_vector + } +} diff --git a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs index aa43e7239f8..3f4b0f3cf5a 100644 --- a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs +++ b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs @@ -1,7 +1,9 @@ +use std::sync::Arc; + use anyhow::Context as _; use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_types::{ - ethabi::Contract, protocol_version::ProtocolSemanticVersion, web3::Log, ProtocolUpgrade, H256, + api::Log, ethabi::Contract, protocol_version::ProtocolSemanticVersion, ProtocolUpgrade, H256, U256, }; @@ -17,12 +19,14 @@ pub struct DecentralizedUpgradesEventProcessor { /// Last protocol version seen. Used to skip events for already known upgrade proposals. last_seen_protocol_version: ProtocolSemanticVersion, update_upgrade_timestamp_signature: H256, + sl_client: Arc, } impl DecentralizedUpgradesEventProcessor { pub fn new( last_seen_protocol_version: ProtocolSemanticVersion, chain_admin_contract: &Contract, + sl_client: Arc, ) -> Self { Self { last_seen_protocol_version, @@ -31,6 +35,7 @@ impl DecentralizedUpgradesEventProcessor { .context("UpdateUpgradeTimestamp event is missing in ABI") .unwrap() .signature(), + sl_client, } } } @@ -40,7 +45,6 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - sl_client: &dyn EthClient, events: Vec, ) -> Result { let mut upgrades = Vec::new(); @@ -51,7 +55,8 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { .ok() .context("upgrade timestamp is too big")?; - let diamond_cut = sl_client + let diamond_cut = self + .sl_client .diamond_cut_by_version(version) .await? .context("missing upgrade data on STM")?; @@ -62,7 +67,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { }; // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { - Some(sl_client.scheduler_vk_hash(address).await?) + Some(self.sl_client.scheduler_vk_hash(address).await?) } else { None }; @@ -128,7 +133,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { Ok(events.len()) } - fn relevant_topic(&self) -> H256 { + fn topic1(&self) -> H256 { self.update_upgrade_timestamp_signature } diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index f145181b0cf..ddbf84e6593 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -2,16 +2,17 @@ use std::fmt; use zksync_dal::{eth_watcher_dal::EventType, Connection, Core}; use zksync_eth_client::{ContractCallError, EnrichedClientError}; -use zksync_types::{web3::Log, H256}; +use zksync_types::{api::Log, H256}; pub(crate) use self::{ + appended_chain_batch_root::BatchRootProcessor, decentralized_upgrades::DecentralizedUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, }; -use crate::client::EthClient; +mod appended_chain_batch_root; mod decentralized_upgrades; -pub mod priority_ops; +mod priority_ops; /// Errors issued by an [`EventProcessor`]. #[derive(Debug, thiserror::Error)] @@ -50,19 +51,28 @@ impl EventProcessorError { /// feeds events to all processors one-by-one. #[async_trait::async_trait] pub(super) trait EventProcessor: 'static + fmt::Debug + Send + Sync { - /// Processes given events. All events are guaranteed to match [`Self::relevant_topic()`]. + /// Processes given events. All events are guaranteed to match [`Self::topic1()`] and [`Self::topic2()`]. /// Returns number of processed events, this result is used to update last processed block. async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - sl_client: &dyn EthClient, events: Vec, ) -> Result; - /// Relevant topic which defines what events to be processed - fn relevant_topic(&self) -> H256; + /// Relevant topic1 which defines what events to be processed + fn topic1(&self) -> H256; + + /// Relevant topic2 which defines what events to be processed + fn topic2(&self) -> Option { + None + } fn event_source(&self) -> EventsSource; fn event_type(&self) -> EventType; + + /// Whether processor expect events only from finalized blocks. + fn only_finalized_block(&self) -> bool { + false + } } diff --git a/core/node/eth_watch/src/event_processors/priority_ops.rs b/core/node/eth_watch/src/event_processors/priority_ops.rs index 051c076850e..cbb224da639 100644 --- a/core/node/eth_watch/src/event_processors/priority_ops.rs +++ b/core/node/eth_watch/src/event_processors/priority_ops.rs @@ -1,10 +1,10 @@ -use std::convert::TryFrom; +use std::{convert::TryFrom, sync::Arc}; use anyhow::Context; use zksync_contracts::hyperchain_contract; use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_shared_metrics::{TxStage, APP_METRICS}; -use zksync_types::{l1::L1Tx, web3::Log, PriorityOpId, H256}; +use zksync_types::{api::Log, l1::L1Tx, PriorityOpId, H256}; use crate::{ client::EthClient, @@ -17,16 +17,21 @@ use crate::{ pub struct PriorityOpsEventProcessor { next_expected_priority_id: PriorityOpId, new_priority_request_signature: H256, + sl_client: Arc, } impl PriorityOpsEventProcessor { - pub fn new(next_expected_priority_id: PriorityOpId) -> anyhow::Result { + pub fn new( + next_expected_priority_id: PriorityOpId, + sl_client: Arc, + ) -> anyhow::Result { Ok(Self { next_expected_priority_id, new_priority_request_signature: hyperchain_contract() .event("NewPriorityRequest") .context("NewPriorityRequest event is missing in ABI")? .signature(), + sl_client, }) } } @@ -36,14 +41,13 @@ impl EventProcessor for PriorityOpsEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - sl_client: &dyn EthClient, events: Vec, ) -> Result { let mut priority_ops = Vec::new(); let events_count = events.len(); for event in events { assert_eq!(event.topics[0], self.new_priority_request_signature); // guaranteed by the watcher - let tx = L1Tx::try_from(event) + let tx = L1Tx::try_from(Into::::into(event)) .map_err(|err| EventProcessorError::log_parse(err, "priority op"))?; priority_ops.push(tx); } @@ -84,7 +88,7 @@ impl EventProcessor for PriorityOpsEventProcessor { let stage_latency = METRICS.poll_eth_node[&PollStage::PersistL1Txs].start(); APP_METRICS.processed_txs[&TxStage::added_to_mempool()].inc(); APP_METRICS.processed_l1_txs[&TxStage::added_to_mempool()].inc(); - let processed_priority_transactions = sl_client.get_total_priority_txs().await?; + let processed_priority_transactions = self.sl_client.get_total_priority_txs().await?; let ops_to_insert: Vec<&L1Tx> = new_ops .iter() .take_while(|op| processed_priority_transactions > op.serial_id().0) @@ -105,7 +109,7 @@ impl EventProcessor for PriorityOpsEventProcessor { Ok(skipped_ops + ops_to_insert.len()) } - fn relevant_topic(&self) -> H256 { + fn topic1(&self) -> H256 { self.new_priority_request_signature } diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index 56411c9b1ee..908ff4da37f 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -2,24 +2,27 @@ //! protocol upgrades etc. //! New events are accepted to the ZKsync network once they have the sufficient amount of L1 confirmations. -use std::time::Duration; +use std::{sync::Arc, time::Duration}; use anyhow::Context as _; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; +use zksync_mini_merkle_tree::MiniMerkleTree; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ ethabi::Contract, protocol_version::ProtocolSemanticVersion, - web3::BlockNumber as Web3BlockNumber, PriorityOpId, + web3::BlockNumber as Web3BlockNumber, L1BatchNumber, L2ChainId, PriorityOpId, }; -pub use self::client::EthHttpQueryClient; +pub use self::client::{EthClient, EthHttpQueryClient, L2EthClient}; use self::{ - client::{EthClient, RETRY_LIMIT}, + client::{L2EthClientW, RETRY_LIMIT}, event_processors::{EventProcessor, EventProcessorError, PriorityOpsEventProcessor}, metrics::METRICS, }; -use crate::event_processors::{DecentralizedUpgradesEventProcessor, EventsSource}; +use crate::event_processors::{ + BatchRootProcessor, DecentralizedUpgradesEventProcessor, EventsSource, +}; mod client; mod event_processors; @@ -31,13 +34,15 @@ mod tests; struct EthWatchState { last_seen_protocol_version: ProtocolSemanticVersion, next_expected_priority_id: PriorityOpId, + chain_batch_root_number_lower_bound: L1BatchNumber, + batch_merkle_tree: MiniMerkleTree<[u8; 96]>, } /// Ethereum watcher component. #[derive(Debug)] pub struct EthWatch { - l1_client: Box, - sl_client: Box, + l1_client: Arc, + sl_client: Arc, poll_interval: Duration, event_processors: Vec>, pool: ConnectionPool, @@ -48,26 +53,44 @@ impl EthWatch { pub async fn new( chain_admin_contract: &Contract, l1_client: Box, - sl_client: Box, + sl_l2_client: Option>, pool: ConnectionPool, poll_interval: Duration, + chain_id: L2ChainId, ) -> anyhow::Result { let mut storage = pool.connection_tagged("eth_watch").await?; - let state = Self::initialize_state(&mut storage).await?; + let l1_client: Arc = l1_client.into(); + let sl_l2_client: Option> = sl_l2_client.map(Into::into); + let sl_client: Arc = if let Some(sl_l2_client) = sl_l2_client.clone() { + Arc::new(L2EthClientW(sl_l2_client)) + } else { + l1_client.clone() + }; + + let state = Self::initialize_state(&mut storage, sl_client.as_ref()).await?; tracing::info!("initialized state: {state:?}"); drop(storage); let priority_ops_processor = - PriorityOpsEventProcessor::new(state.next_expected_priority_id)?; + PriorityOpsEventProcessor::new(state.next_expected_priority_id, sl_client.clone())?; let decentralized_upgrades_processor = DecentralizedUpgradesEventProcessor::new( state.last_seen_protocol_version, chain_admin_contract, + sl_client.clone(), ); - let event_processors: Vec> = vec![ + let mut event_processors: Vec> = vec![ Box::new(priority_ops_processor), Box::new(decentralized_upgrades_processor), ]; - + if let Some(sl_l2_client) = sl_l2_client { + let batch_root_processor = BatchRootProcessor::new( + state.chain_batch_root_number_lower_bound, + state.batch_merkle_tree, + chain_id, + sl_l2_client, + ); + event_processors.push(Box::new(batch_root_processor)); + } Ok(Self { l1_client, sl_client, @@ -78,7 +101,10 @@ impl EthWatch { } #[tracing::instrument(name = "EthWatch::initialize_state", skip_all)] - async fn initialize_state(storage: &mut Connection<'_, Core>) -> anyhow::Result { + async fn initialize_state( + storage: &mut Connection<'_, Core>, + sl_client: &dyn EthClient, + ) -> anyhow::Result { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() .last_priority_id() @@ -91,9 +117,26 @@ impl EthWatch { .await? .context("expected at least one (genesis) version to be present in DB")?; + let sl_chain_id = sl_client.chain_id().await?; + let batch_hashes = storage + .blocks_dal() + .get_executed_batch_roots_on_sl(sl_chain_id) + .await?; + + let chain_batch_root_number_lower_bound = batch_hashes + .last() + .map(|(n, _)| *n + 1) + .unwrap_or(L1BatchNumber(0)); + let tree_leaves = batch_hashes.into_iter().map(|(batch_number, batch_root)| { + BatchRootProcessor::batch_leaf_preimage(batch_root, batch_number) + }); + let batch_merkle_tree = MiniMerkleTree::new(tree_leaves, None); + Ok(EthWatchState { next_expected_priority_id, last_seen_protocol_version, + chain_batch_root_number_lower_bound, + batch_merkle_tree, }) } @@ -138,37 +181,42 @@ impl EthWatch { EventsSource::SL => self.sl_client.as_ref(), }; let chain_id = client.chain_id().await?; - let finalized_block = client.finalized_block_number().await?; + let to_block = if processor.only_finalized_block() { + client.finalized_block_number().await? + } else { + client.confirmed_block_number().await? + }; let from_block = storage .eth_watcher_dal() .get_or_set_next_block_to_process( processor.event_type(), chain_id, - finalized_block.saturating_sub(PRIORITY_EXPIRATION), + to_block.saturating_sub(PRIORITY_EXPIRATION), ) .await .map_err(DalError::generalize)?; // There are no new blocks so there is nothing to be done - if from_block > finalized_block { + if from_block > to_block { continue; } + let processor_events = client .get_events( Web3BlockNumber::Number(from_block.into()), - Web3BlockNumber::Number(finalized_block.into()), - processor.relevant_topic(), - None, + Web3BlockNumber::Number(to_block.into()), + processor.topic1(), + processor.topic2(), RETRY_LIMIT, ) .await?; let processed_events_count = processor - .process_events(storage, &*self.sl_client, processor_events.clone()) + .process_events(storage, processor_events.clone()) .await?; let next_block_to_process = if processed_events_count == processor_events.len() { - finalized_block + 1 + to_block + 1 } else if processed_events_count == 0 { //nothing was processed from_block diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs deleted file mode 100644 index 1dc72dca3c2..00000000000 --- a/core/node/eth_watch/src/tests.rs +++ /dev/null @@ -1,788 +0,0 @@ -use std::{collections::HashMap, convert::TryInto, sync::Arc}; - -use tokio::sync::RwLock; -use zksync_contracts::{ - chain_admin_contract, hyperchain_contract, state_transition_manager_contract, -}; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_eth_client::{ContractCallError, EnrichedClientResult}; -use zksync_types::{ - abi, - abi::ProposedUpgrade, - ethabi, - ethabi::Token, - l1::{L1Tx, OpProcessingType, PriorityQueueType}, - protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, - protocol_version::ProtocolSemanticVersion, - web3::{contract::Tokenizable, BlockNumber, Log}, - Address, Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, - ProtocolVersionId, SLChainId, Transaction, H256, U256, U64, -}; - -use crate::{ - client::{EthClient, RETRY_LIMIT}, - EthWatch, -}; - -#[derive(Debug)] -struct FakeEthClientData { - transactions: HashMap>, - diamond_upgrades: HashMap>, - upgrade_timestamp: HashMap>, - last_finalized_block_number: u64, - chain_id: SLChainId, - processed_priority_transactions_count: u64, -} - -impl FakeEthClientData { - fn new(chain_id: SLChainId) -> Self { - Self { - transactions: Default::default(), - diamond_upgrades: Default::default(), - upgrade_timestamp: Default::default(), - last_finalized_block_number: 0, - chain_id, - processed_priority_transactions_count: 0, - } - } - - fn add_transactions(&mut self, transactions: &[L1Tx]) { - for transaction in transactions { - let eth_block = transaction.eth_block(); - self.transactions - .entry(eth_block.0 as u64) - .or_default() - .push(tx_into_log(transaction.clone())); - self.processed_priority_transactions_count += 1; - } - } - - fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - for (upgrade, eth_block) in upgrades { - self.upgrade_timestamp - .entry(*eth_block) - .or_default() - .push(upgrade_timestamp_log(*eth_block)); - self.diamond_upgrades - .entry(*eth_block) - .or_default() - .push(diamond_upgrade_log(upgrade.clone(), *eth_block)); - } - } - - fn set_last_finalized_block_number(&mut self, number: u64) { - self.last_finalized_block_number = number; - } - - fn set_processed_priority_transactions_count(&mut self, number: u64) { - self.processed_priority_transactions_count = number; - } -} - -#[derive(Debug, Clone)] -struct MockEthClient { - inner: Arc>, -} - -impl MockEthClient { - fn new(chain_id: SLChainId) -> Self { - Self { - inner: Arc::new(RwLock::new(FakeEthClientData::new(chain_id))), - } - } - - async fn add_transactions(&mut self, transactions: &[L1Tx]) { - self.inner.write().await.add_transactions(transactions); - } - - async fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - self.inner.write().await.add_upgrade_timestamp(upgrades); - } - - async fn set_last_finalized_block_number(&mut self, number: u64) { - self.inner - .write() - .await - .set_last_finalized_block_number(number); - } - - async fn set_processed_priority_transactions_count(&mut self, number: u64) { - self.inner - .write() - .await - .set_processed_priority_transactions_count(number) - } - - async fn block_to_number(&self, block: BlockNumber) -> u64 { - match block { - BlockNumber::Earliest => 0, - BlockNumber::Number(number) => number.as_u64(), - BlockNumber::Pending - | BlockNumber::Latest - | BlockNumber::Finalized - | BlockNumber::Safe => unreachable!(), - } - } -} - -#[async_trait::async_trait] -impl EthClient for MockEthClient { - async fn get_events( - &self, - from: BlockNumber, - to: BlockNumber, - topic1: H256, - topic2: Option, - _retries_left: usize, - ) -> EnrichedClientResult> { - let from = self.block_to_number(from).await; - let to = self.block_to_number(to).await; - let mut logs = vec![]; - for number in from..=to { - if let Some(ops) = self.inner.read().await.transactions.get(&number) { - logs.extend_from_slice(ops); - } - if let Some(ops) = self.inner.read().await.diamond_upgrades.get(&number) { - logs.extend_from_slice(ops); - } - if let Some(ops) = self.inner.read().await.upgrade_timestamp.get(&number) { - logs.extend_from_slice(ops); - } - } - Ok(logs - .into_iter() - .filter(|log| { - log.topics.first() == Some(&topic1) - && (topic2.is_none() || log.topics.get(1) == topic2.as_ref()) - }) - .collect()) - } - - async fn scheduler_vk_hash( - &self, - _verifier_address: Address, - ) -> Result { - Ok(H256::zero()) - } - - async fn finalized_block_number(&self) -> EnrichedClientResult { - Ok(self.inner.read().await.last_finalized_block_number) - } - - async fn diamond_cut_by_version( - &self, - packed_version: H256, - ) -> EnrichedClientResult>> { - let from_block = *self - .inner - .read() - .await - .diamond_upgrades - .keys() - .min() - .unwrap_or(&0); - let to_block = *self - .inner - .read() - .await - .diamond_upgrades - .keys() - .max() - .unwrap_or(&0); - - let logs = self - .get_events( - U64::from(from_block).into(), - U64::from(to_block).into(), - state_transition_manager_contract() - .event("NewUpgradeCutData") - .unwrap() - .signature(), - Some(packed_version), - RETRY_LIMIT, - ) - .await?; - - Ok(logs.into_iter().next().map(|log| log.data.0)) - } - - async fn get_total_priority_txs(&self) -> Result { - Ok(self - .inner - .read() - .await - .processed_priority_transactions_count) - } - - async fn chain_id(&self) -> EnrichedClientResult { - Ok(self.inner.read().await.chain_id) - } -} - -fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { - let tx = L1Tx { - execute: Execute { - contract_address: Some(Address::repeat_byte(0x11)), - calldata: vec![1, 2, 3], - factory_deps: vec![], - value: U256::zero(), - }, - common_data: L1TxCommonData { - serial_id: PriorityOpId(serial_id), - sender: [1u8; 20].into(), - eth_block, - gas_limit: Default::default(), - max_fee_per_gas: Default::default(), - gas_per_pubdata_limit: 1u32.into(), - full_fee: Default::default(), - layer_2_tip_fee: U256::from(10u8), - refund_recipient: Address::zero(), - to_mint: Default::default(), - priority_queue_type: PriorityQueueType::Deque, - op_processing_type: OpProcessingType::Common, - canonical_tx_hash: H256::default(), - }, - received_timestamp_ms: 0, - }; - // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - let tx = Transaction::from_abi( - abi::Transaction::try_from(Transaction::from(tx)).unwrap(), - false, - ) - .unwrap(); - tx.try_into().unwrap() -} - -fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { - let tx = ProtocolUpgradeTx { - execute: Execute { - contract_address: Some(Address::repeat_byte(0x11)), - calldata: vec![1, 2, 3], - factory_deps: vec![], - value: U256::zero(), - }, - common_data: ProtocolUpgradeTxCommonData { - upgrade_id: id, - sender: [1u8; 20].into(), - eth_block, - gas_limit: Default::default(), - max_fee_per_gas: Default::default(), - gas_per_pubdata_limit: 1u32.into(), - refund_recipient: Address::zero(), - to_mint: Default::default(), - canonical_tx_hash: H256::zero(), - }, - received_timestamp_ms: 0, - }; - // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - Transaction::from_abi( - abi::Transaction::try_from(Transaction::from(tx)).unwrap(), - false, - ) - .unwrap() - .try_into() - .unwrap() -} - -async fn create_test_watcher( - connection_pool: ConnectionPool, - is_gateway: bool, -) -> (EthWatch, MockEthClient, MockEthClient) { - let l1_client = MockEthClient::new(SLChainId(42)); - let sl_client = if is_gateway { - MockEthClient::new(SLChainId(123)) - } else { - l1_client.clone() - }; - let watcher = EthWatch::new( - &chain_admin_contract(), - Box::new(l1_client.clone()), - Box::new(sl_client.clone()), - connection_pool, - std::time::Duration::from_nanos(1), - ) - .await - .unwrap(); - - (watcher, l1_client, sl_client) -} - -async fn create_l1_test_watcher( - connection_pool: ConnectionPool, -) -> (EthWatch, MockEthClient) { - let (watcher, l1_client, _) = create_test_watcher(connection_pool, false).await; - (watcher, l1_client) -} - -async fn create_gateway_test_watcher( - connection_pool: ConnectionPool, -) -> (EthWatch, MockEthClient, MockEthClient) { - create_test_watcher(connection_pool, true).await -} - -#[test_log::test(tokio::test)] -async fn test_normal_operation_l1_txs() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[build_l1_tx(0, 10), build_l1_tx(1, 14), build_l1_tx(2, 18)]) - .await; - client.set_last_finalized_block_number(15).await; - // second tx will not be processed, as it's block is not finalized yet. - watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage).await; - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - assert_eq!(db_txs.len(), 2); - let db_tx = db_txs[0].clone(); - assert_eq!(db_tx.common_data.serial_id.0, 0); - let db_tx = db_txs[1].clone(); - assert_eq!(db_tx.common_data.serial_id.0, 1); - - client.set_last_finalized_block_number(20).await; - // now the second tx will be processed - watcher.loop_iteration(&mut storage).await.unwrap(); - let db_txs = get_all_db_txs(&mut storage).await; - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - assert_eq!(db_txs.len(), 3); - let db_tx = db_txs[2].clone(); - assert_eq!(db_tx.common_data.serial_id.0, 2); -} - -#[test_log::test(tokio::test)] -async fn test_gap_in_upgrade_timestamp() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_upgrade_timestamp(&[( - ProtocolUpgrade { - version: ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 0.into(), - }, - tx: None, - ..Default::default() - }, - 10, - )]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_versions = storage.protocol_versions_dal().all_versions().await; - // there should be genesis version and just added version - assert_eq!(db_versions.len(), 2); - - let previous_version = (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(); - let next_version = ProtocolVersionId::next(); - assert_eq!(db_versions[0].minor, previous_version); - assert_eq!(db_versions[1].minor, next_version); -} - -#[test_log::test(tokio::test)] -async fn test_normal_operation_upgrade_timestamp() { - zksync_concurrency::testonly::abort_on_panic(); - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - - let mut client = MockEthClient::new(SLChainId(42)); - let mut watcher = EthWatch::new( - &chain_admin_contract(), - Box::new(client.clone()), - Box::new(client.clone()), - connection_pool.clone(), - std::time::Duration::from_nanos(1), - ) - .await - .unwrap(); - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_upgrade_timestamp(&[ - ( - ProtocolUpgrade { - tx: None, - ..Default::default() - }, - 10, - ), - ( - ProtocolUpgrade { - version: ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 0.into(), - }, - tx: Some(build_upgrade_tx(ProtocolVersionId::next(), 18)), - ..Default::default() - }, - 18, - ), - ( - ProtocolUpgrade { - version: ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 1.into(), - }, - tx: None, - ..Default::default() - }, - 19, - ), - ]) - .await; - client.set_last_finalized_block_number(15).await; - // The second upgrade will not be processed, as it has less than 5 confirmations. - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_versions = storage.protocol_versions_dal().all_versions().await; - // There should be genesis version and just added version. - assert_eq!(db_versions.len(), 2); - assert_eq!(db_versions[1].minor, ProtocolVersionId::latest()); - - client.set_last_finalized_block_number(20).await; - // Now the second and the third upgrades will be processed. - watcher.loop_iteration(&mut storage).await.unwrap(); - let db_versions = storage.protocol_versions_dal().all_versions().await; - let mut expected_version = ProtocolSemanticVersion { - minor: ProtocolVersionId::next(), - patch: 0.into(), - }; - assert_eq!(db_versions.len(), 4); - assert_eq!(db_versions[2], expected_version); - expected_version.patch += 1; - assert_eq!(db_versions[3], expected_version); - - // Check that tx was saved with the second upgrade. - let tx = storage - .protocol_versions_dal() - .get_protocol_upgrade_tx(ProtocolVersionId::next()) - .await - .unwrap() - .expect("no protocol upgrade transaction"); - assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); -} - -#[test_log::test(tokio::test)] -#[should_panic] -async fn test_gap_in_single_batch() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[ - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - build_l1_tx(3, 14), - build_l1_tx(5, 14), - ]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); -} - -#[test_log::test(tokio::test)] -#[should_panic] -async fn test_gap_between_batches() { - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[ - // this goes to the first batch - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - // this goes to the second batch - build_l1_tx(4, 20), - build_l1_tx(5, 22), - ]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 3); - client.set_last_finalized_block_number(25).await; - watcher.loop_iteration(&mut storage).await.unwrap(); -} - -#[test_log::test(tokio::test)] -async fn test_overlapping_batches() { - zksync_concurrency::testonly::abort_on_panic(); - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - client - .add_transactions(&[ - // this goes to the first batch - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - // this goes to the second batch - build_l1_tx(1, 20), - build_l1_tx(2, 22), - build_l1_tx(3, 23), - build_l1_tx(4, 23), - ]) - .await; - client.set_last_finalized_block_number(15).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 3); - - client.set_last_finalized_block_number(25).await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 5); - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - let tx = db_txs[2].clone(); - assert_eq!(tx.common_data.serial_id.0, 2); - let tx = db_txs[4].clone(); - assert_eq!(tx.common_data.serial_id.0, 4); -} - -#[test_log::test(tokio::test)] -async fn test_transactions_get_gradually_processed_by_gateway() { - zksync_concurrency::testonly::abort_on_panic(); - let connection_pool = ConnectionPool::::test_pool().await; - setup_db(&connection_pool).await; - let (mut watcher, mut l1_client, mut gateway_client) = - create_gateway_test_watcher(connection_pool.clone()).await; - - let mut storage = connection_pool.connection().await.unwrap(); - l1_client - .add_transactions(&[ - build_l1_tx(0, 10), - build_l1_tx(1, 14), - build_l1_tx(2, 14), - build_l1_tx(3, 20), - build_l1_tx(4, 22), - ]) - .await; - l1_client.set_last_finalized_block_number(15).await; - gateway_client - .set_processed_priority_transactions_count(2) - .await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 2); - - l1_client.set_last_finalized_block_number(25).await; - gateway_client - .set_processed_priority_transactions_count(4) - .await; - watcher.loop_iteration(&mut storage).await.unwrap(); - - let db_txs = get_all_db_txs(&mut storage).await; - assert_eq!(db_txs.len(), 4); - let mut db_txs: Vec = db_txs - .into_iter() - .map(|tx| tx.try_into().unwrap()) - .collect(); - db_txs.sort_by_key(|tx| tx.common_data.serial_id); - let tx = db_txs[2].clone(); - assert_eq!(tx.common_data.serial_id.0, 2); - let tx = db_txs[3].clone(); - assert_eq!(tx.common_data.serial_id.0, 3); -} - -async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec { - storage.transactions_dal().reset_mempool().await.unwrap(); - storage - .transactions_dal() - .sync_mempool(&[], &[], 0, 0, 1000) - .await - .unwrap() -} - -fn tx_into_log(tx: L1Tx) -> Log { - let tx = abi::Transaction::try_from(Transaction::from(tx)).unwrap(); - let abi::Transaction::L1 { - tx, - factory_deps, - eth_block, - .. - } = tx - else { - unreachable!() - }; - - let data = ethabi::encode( - &abi::NewPriorityRequest { - tx_id: tx.nonce, - tx_hash: tx.hash().into(), - expiration_timestamp: u64::MAX, - transaction: tx, - factory_deps, - } - .encode(), - ); - - Log { - address: Address::repeat_byte(0x1), - topics: vec![hyperchain_contract() - .event("NewPriorityRequest") - .expect("NewPriorityRequest event is missing in abi") - .signature()], - data: data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::default()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - block_timestamp: None, - } -} - -fn init_calldata(protocol_upgrade: ProtocolUpgrade) -> Vec { - let upgrade_token = upgrade_into_diamond_cut(protocol_upgrade); - - let encoded_params = ethabi::encode(&[upgrade_token]); - - let execute_upgrade_selector = hyperchain_contract() - .function("executeUpgrade") - .unwrap() - .short_signature(); - - // Concatenate the function selector with the encoded parameters - let mut calldata = Vec::with_capacity(4 + encoded_params.len()); - calldata.extend_from_slice(&execute_upgrade_selector); - calldata.extend_from_slice(&encoded_params); - - calldata -} - -fn diamond_upgrade_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { - // struct DiamondCutData { - // FacetCut[] facetCuts; - // address initAddress; - // bytes initCalldata; - // } - let final_data = ethabi::encode(&[Token::Tuple(vec![ - Token::Array(vec![]), - Token::Address(Address::zero()), - Token::Bytes(init_calldata(upgrade.clone())), - ])]); - tracing::info!("{:?}", Token::Bytes(init_calldata(upgrade))); - - Log { - address: Address::repeat_byte(0x1), - topics: vec![ - state_transition_manager_contract() - .event("NewUpgradeCutData") - .unwrap() - .signature(), - H256::from_low_u64_be(eth_block), - ], - data: final_data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::random()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - block_timestamp: None, - } -} -fn upgrade_timestamp_log(eth_block: u64) -> Log { - let final_data = ethabi::encode(&[U256::from(12345).into_token()]); - - Log { - address: Address::repeat_byte(0x1), - topics: vec![ - chain_admin_contract() - .event("UpdateUpgradeTimestamp") - .expect("UpdateUpgradeTimestamp event is missing in ABI") - .signature(), - H256::from_low_u64_be(eth_block), - ], - data: final_data.into(), - block_hash: Some(H256::repeat_byte(0x11)), - block_number: Some(eth_block.into()), - transaction_hash: Some(H256::random()), - transaction_index: Some(0u64.into()), - log_index: Some(0u64.into()), - transaction_log_index: Some(0u64.into()), - log_type: None, - removed: None, - block_timestamp: None, - } -} - -fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { - let abi::Transaction::L1 { - tx, factory_deps, .. - } = upgrade - .tx - .map(|tx| Transaction::from(tx).try_into().unwrap()) - .unwrap_or(abi::Transaction::L1 { - tx: Default::default(), - factory_deps: vec![], - eth_block: 0, - }) - else { - unreachable!() - }; - ProposedUpgrade { - l2_protocol_upgrade_tx: tx, - factory_deps, - bootloader_hash: upgrade.bootloader_code_hash.unwrap_or_default().into(), - default_account_hash: upgrade.default_account_code_hash.unwrap_or_default().into(), - verifier: upgrade.verifier_address.unwrap_or_default(), - verifier_params: upgrade.verifier_params.unwrap_or_default().into(), - l1_contracts_upgrade_calldata: vec![], - post_upgrade_calldata: vec![], - upgrade_timestamp: upgrade.timestamp.into(), - new_protocol_version: upgrade.version.pack(), - } - .encode() -} - -async fn setup_db(connection_pool: &ConnectionPool) { - connection_pool - .connection() - .await - .unwrap() - .protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion { - version: ProtocolSemanticVersion { - minor: (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), - patch: 0.into(), - }, - ..Default::default() - }) - .await - .unwrap(); -} diff --git a/core/node/eth_watch/src/tests/client.rs b/core/node/eth_watch/src/tests/client.rs new file mode 100644 index 00000000000..dbf9ca6f984 --- /dev/null +++ b/core/node/eth_watch/src/tests/client.rs @@ -0,0 +1,487 @@ +use std::{collections::HashMap, convert::TryInto, sync::Arc}; + +use tokio::sync::RwLock; +use zksync_contracts::{ + chain_admin_contract, hyperchain_contract, state_transition_manager_contract, +}; +use zksync_eth_client::{ContractCallError, EnrichedClientResult}; +use zksync_types::{ + abi, + abi::ProposedUpgrade, + api::{ChainAggProof, Log}, + ethabi, + ethabi::Token, + l1::L1Tx, + web3::{contract::Tokenizable, BlockNumber}, + Address, L1BatchNumber, L2ChainId, ProtocolUpgrade, SLChainId, Transaction, H256, U256, U64, +}; +use zksync_utils::u256_to_h256; + +use crate::client::{EthClient, L2EthClient, RETRY_LIMIT}; + +#[derive(Debug)] +pub struct FakeEthClientData { + transactions: HashMap>, + diamond_upgrades: HashMap>, + upgrade_timestamp: HashMap>, + last_finalized_block_number: u64, + chain_id: SLChainId, + processed_priority_transactions_count: u64, + chain_log_proofs: HashMap, + batch_roots: HashMap>, + chain_roots: HashMap, +} + +impl FakeEthClientData { + fn new(chain_id: SLChainId) -> Self { + Self { + transactions: Default::default(), + diamond_upgrades: Default::default(), + upgrade_timestamp: Default::default(), + last_finalized_block_number: 0, + chain_id, + processed_priority_transactions_count: 0, + chain_log_proofs: Default::default(), + batch_roots: Default::default(), + chain_roots: Default::default(), + } + } + + fn add_transactions(&mut self, transactions: &[L1Tx]) { + for transaction in transactions { + let eth_block = transaction.eth_block(); + self.transactions + .entry(eth_block.0 as u64) + .or_default() + .push(tx_into_log(transaction.clone())); + self.processed_priority_transactions_count += 1; + } + } + + fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + for (upgrade, eth_block) in upgrades { + self.upgrade_timestamp + .entry(*eth_block) + .or_default() + .push(upgrade_timestamp_log(*eth_block)); + self.diamond_upgrades + .entry(*eth_block) + .or_default() + .push(diamond_upgrade_log(upgrade.clone(), *eth_block)); + } + } + + fn set_last_finalized_block_number(&mut self, number: u64) { + self.last_finalized_block_number = number; + } + + fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.processed_priority_transactions_count = number; + } + + fn add_batch_roots(&mut self, batch_roots: &[(u64, u64, H256)]) { + for (sl_block, l2_batch_number, batch_root) in batch_roots { + self.batch_roots + .entry(*sl_block) + .or_default() + .push(batch_root_to_log(*sl_block, *l2_batch_number, *batch_root)); + } + } + + fn add_chain_roots(&mut self, chain_roots: &[(u64, H256)]) { + for (batch, root) in chain_roots { + self.chain_roots.insert(*batch, *root); + } + } + + fn add_chain_log_proofs(&mut self, chain_log_proofs: Vec<(L1BatchNumber, ChainAggProof)>) { + for (batch, proof) in chain_log_proofs { + self.chain_log_proofs.insert(batch, proof); + } + } +} + +#[derive(Debug, Clone)] +pub struct MockEthClient { + inner: Arc>, +} + +impl MockEthClient { + pub fn new(chain_id: SLChainId) -> Self { + Self { + inner: Arc::new(RwLock::new(FakeEthClientData::new(chain_id))), + } + } + + pub async fn add_transactions(&mut self, transactions: &[L1Tx]) { + self.inner.write().await.add_transactions(transactions); + } + + pub async fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + self.inner.write().await.add_upgrade_timestamp(upgrades); + } + + pub async fn set_last_finalized_block_number(&mut self, number: u64) { + self.inner + .write() + .await + .set_last_finalized_block_number(number); + } + + pub async fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.inner + .write() + .await + .set_processed_priority_transactions_count(number) + } + + pub async fn block_to_number(&self, block: BlockNumber) -> u64 { + match block { + BlockNumber::Earliest => 0, + BlockNumber::Number(number) => number.as_u64(), + BlockNumber::Pending + | BlockNumber::Latest + | BlockNumber::Finalized + | BlockNumber::Safe => unreachable!(), + } + } + + pub async fn add_batch_roots(&mut self, batch_roots: &[(u64, u64, H256)]) { + self.inner.write().await.add_batch_roots(batch_roots); + } + + pub async fn add_chain_roots(&mut self, chain_roots: &[(u64, H256)]) { + self.inner.write().await.add_chain_roots(chain_roots); + } + + pub async fn add_chain_log_proofs( + &mut self, + chain_log_proofs: Vec<(L1BatchNumber, ChainAggProof)>, + ) { + self.inner + .write() + .await + .add_chain_log_proofs(chain_log_proofs); + } +} + +#[async_trait::async_trait] +impl EthClient for MockEthClient { + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + topic1: H256, + topic2: Option, + _retries_left: usize, + ) -> EnrichedClientResult> { + let from = self.block_to_number(from).await; + let to = self.block_to_number(to).await; + let mut logs = vec![]; + for number in from..=to { + if let Some(ops) = self.inner.read().await.transactions.get(&number) { + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.diamond_upgrades.get(&number) { + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.upgrade_timestamp.get(&number) { + logs.extend_from_slice(ops); + } + if let Some(ops) = self.inner.read().await.batch_roots.get(&number) { + logs.extend_from_slice(ops); + } + } + Ok(logs + .into_iter() + .filter(|log| { + log.topics.first() == Some(&topic1) + && (topic2.is_none() || log.topics.get(1) == topic2.as_ref()) + }) + .collect()) + } + + async fn scheduler_vk_hash( + &self, + _verifier_address: Address, + ) -> Result { + Ok(H256::zero()) + } + + async fn finalized_block_number(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.last_finalized_block_number) + } + + async fn confirmed_block_number(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.last_finalized_block_number) + } + + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + let from_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .min() + .unwrap_or(&0); + let to_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .max() + .unwrap_or(&0); + + let logs = self + .get_events( + U64::from(from_block).into(), + U64::from(to_block).into(), + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + Some(packed_version), + RETRY_LIMIT, + ) + .await?; + + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + + async fn get_total_priority_txs(&self) -> Result { + Ok(self + .inner + .read() + .await + .processed_priority_transactions_count) + } + + async fn chain_id(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.chain_id) + } + + async fn get_chain_root( + &self, + _block_number: U64, + _l2_chain_id: L2ChainId, + ) -> Result { + unimplemented!() + } +} + +#[async_trait::async_trait] +impl L2EthClient for MockEthClient { + async fn get_chain_log_proof( + &self, + l1_batch_number: L1BatchNumber, + _chain_id: L2ChainId, + ) -> EnrichedClientResult> { + Ok(self + .inner + .read() + .await + .chain_log_proofs + .get(&l1_batch_number) + .cloned()) + } + + async fn get_chain_root_l2( + &self, + l1_batch_number: L1BatchNumber, + _l2_chain_id: L2ChainId, + ) -> Result, ContractCallError> { + Ok(self + .inner + .read() + .await + .chain_roots + .get(&l1_batch_number.0.into()) + .cloned()) + } +} + +fn tx_into_log(tx: L1Tx) -> Log { + let tx = abi::Transaction::try_from(Transaction::from(tx)).unwrap(); + let abi::Transaction::L1 { + tx, + factory_deps, + eth_block, + .. + } = tx + else { + unreachable!() + }; + + let data = ethabi::encode( + &abi::NewPriorityRequest { + tx_id: tx.nonce, + tx_hash: tx.hash().into(), + expiration_timestamp: u64::MAX, + transaction: tx, + factory_deps, + } + .encode(), + ); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![hyperchain_contract() + .event("NewPriorityRequest") + .expect("NewPriorityRequest event is missing in abi") + .signature()], + data: data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + l1_batch_number: None, + transaction_hash: Some(H256::default()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} + +fn init_calldata(protocol_upgrade: ProtocolUpgrade) -> Vec { + let upgrade_token = upgrade_into_diamond_cut(protocol_upgrade); + + let encoded_params = ethabi::encode(&[upgrade_token]); + + let execute_upgrade_selector = hyperchain_contract() + .function("executeUpgrade") + .unwrap() + .short_signature(); + + // Concatenate the function selector with the encoded parameters + let mut calldata = Vec::with_capacity(4 + encoded_params.len()); + calldata.extend_from_slice(&execute_upgrade_selector); + calldata.extend_from_slice(&encoded_params); + + calldata +} + +fn diamond_upgrade_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { + // struct DiamondCutData { + // FacetCut[] facetCuts; + // address initAddress; + // bytes initCalldata; + // } + let final_data = ethabi::encode(&[Token::Tuple(vec![ + Token::Array(vec![]), + Token::Address(Address::zero()), + Token::Bytes(init_calldata(upgrade.clone())), + ])]); + tracing::info!("{:?}", Token::Bytes(init_calldata(upgrade))); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![ + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + H256::from_low_u64_be(eth_block), + ], + data: final_data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + l1_batch_number: None, + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} +fn upgrade_timestamp_log(eth_block: u64) -> Log { + let final_data = ethabi::encode(&[U256::from(12345).into_token()]); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![ + chain_admin_contract() + .event("UpdateUpgradeTimestamp") + .expect("UpdateUpgradeTimestamp event is missing in ABI") + .signature(), + H256::from_low_u64_be(eth_block), + ], + data: final_data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + l1_batch_number: None, + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} + +fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { + let abi::Transaction::L1 { + tx, factory_deps, .. + } = upgrade + .tx + .map(|tx| Transaction::from(tx).try_into().unwrap()) + .unwrap_or(abi::Transaction::L1 { + tx: Default::default(), + factory_deps: vec![], + eth_block: 0, + }) + else { + unreachable!() + }; + ProposedUpgrade { + l2_protocol_upgrade_tx: tx, + factory_deps, + bootloader_hash: upgrade.bootloader_code_hash.unwrap_or_default().into(), + default_account_hash: upgrade.default_account_code_hash.unwrap_or_default().into(), + verifier: upgrade.verifier_address.unwrap_or_default(), + verifier_params: upgrade.verifier_params.unwrap_or_default().into(), + l1_contracts_upgrade_calldata: vec![], + post_upgrade_calldata: vec![], + upgrade_timestamp: upgrade.timestamp.into(), + new_protocol_version: upgrade.version.pack(), + } + .encode() +} + +fn batch_root_to_log(sl_block_number: u64, l2_batch_number: u64, batch_root: H256) -> Log { + let topic1 = ethabi::long_signature( + "AppendedChainBatchRoot", + &[ + ethabi::ParamType::Uint(256), + ethabi::ParamType::Uint(256), + ethabi::ParamType::FixedBytes(32), + ], + ); + let topic2 = u256_to_h256(L2ChainId::default().0.into()); + let topic3 = u256_to_h256(l2_batch_number.into()); + let data = ethabi::encode(&[batch_root.into_token()]); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![topic1, topic2, topic3], + data: data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(sl_block_number.into()), + l1_batch_number: Some(sl_block_number.into()), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} diff --git a/core/node/eth_watch/src/tests/mod.rs b/core/node/eth_watch/src/tests/mod.rs new file mode 100644 index 00000000000..786c8577a2e --- /dev/null +++ b/core/node/eth_watch/src/tests/mod.rs @@ -0,0 +1,824 @@ +use std::convert::TryInto; + +use zksync_contracts::chain_admin_contract; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_types::{ + abi, + aggregated_operations::AggregatedActionType, + api::ChainAggProof, + block::L1BatchHeader, + commitment::L1BatchCommitmentArtifacts, + l1::{L1Tx, OpProcessingType, PriorityQueueType}, + l2_to_l1_log::BatchAndChainMerklePath, + protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, + protocol_version::ProtocolSemanticVersion, + Address, Execute, L1BatchNumber, L1TxCommonData, L2ChainId, PriorityOpId, ProtocolUpgrade, + ProtocolVersion, ProtocolVersionId, SLChainId, Transaction, H256, U256, +}; + +use crate::{tests::client::MockEthClient, EthWatch, L2EthClient}; + +mod client; + +const SL_CHAIN_ID: SLChainId = SLChainId(505); + +fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { + let tx = L1Tx { + execute: Execute { + contract_address: Some(Address::repeat_byte(0x11)), + calldata: vec![1, 2, 3], + factory_deps: vec![], + value: U256::zero(), + }, + common_data: L1TxCommonData { + serial_id: PriorityOpId(serial_id), + sender: [1u8; 20].into(), + eth_block, + gas_limit: Default::default(), + max_fee_per_gas: Default::default(), + gas_per_pubdata_limit: 1u32.into(), + full_fee: Default::default(), + layer_2_tip_fee: U256::from(10u8), + refund_recipient: Address::zero(), + to_mint: Default::default(), + priority_queue_type: PriorityQueueType::Deque, + op_processing_type: OpProcessingType::Common, + canonical_tx_hash: H256::default(), + }, + received_timestamp_ms: 0, + }; + // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. + let tx = Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap(); + tx.try_into().unwrap() +} + +fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { + let tx = ProtocolUpgradeTx { + execute: Execute { + contract_address: Some(Address::repeat_byte(0x11)), + calldata: vec![1, 2, 3], + factory_deps: vec![], + value: U256::zero(), + }, + common_data: ProtocolUpgradeTxCommonData { + upgrade_id: id, + sender: [1u8; 20].into(), + eth_block, + gas_limit: Default::default(), + max_fee_per_gas: Default::default(), + gas_per_pubdata_limit: 1u32.into(), + refund_recipient: Address::zero(), + to_mint: Default::default(), + canonical_tx_hash: H256::zero(), + }, + received_timestamp_ms: 0, + }; + // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. + Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap() + .try_into() + .unwrap() +} + +async fn create_test_watcher( + connection_pool: ConnectionPool, + is_gateway: bool, +) -> (EthWatch, MockEthClient, MockEthClient) { + let l1_client = MockEthClient::new(SLChainId(42)); + let sl_client = MockEthClient::new(SL_CHAIN_ID); + let sl_l2_client: Option> = if is_gateway { + Some(Box::new(sl_client.clone())) + } else { + None + }; + let watcher = EthWatch::new( + &chain_admin_contract(), + Box::new(l1_client.clone()), + sl_l2_client, + connection_pool, + std::time::Duration::from_nanos(1), + L2ChainId::default(), + ) + .await + .unwrap(); + + (watcher, l1_client, sl_client) +} + +async fn create_l1_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient) { + let (watcher, l1_client, _) = create_test_watcher(connection_pool, false).await; + (watcher, l1_client) +} + +async fn create_gateway_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient, MockEthClient) { + create_test_watcher(connection_pool, true).await +} + +#[test_log::test(tokio::test)] +async fn test_normal_operation_l1_txs() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[build_l1_tx(0, 10), build_l1_tx(1, 14), build_l1_tx(2, 18)]) + .await; + client.set_last_finalized_block_number(15).await; + // second tx will not be processed, as it's block is not finalized yet. + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_txs = get_all_db_txs(&mut storage).await; + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + assert_eq!(db_txs.len(), 2); + let db_tx = db_txs[0].clone(); + assert_eq!(db_tx.common_data.serial_id.0, 0); + let db_tx = db_txs[1].clone(); + assert_eq!(db_tx.common_data.serial_id.0, 1); + + client.set_last_finalized_block_number(20).await; + // now the second tx will be processed + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_txs = get_all_db_txs(&mut storage).await; + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + assert_eq!(db_txs.len(), 3); + let db_tx = db_txs[2].clone(); + assert_eq!(db_tx.common_data.serial_id.0, 2); +} + +#[test_log::test(tokio::test)] +async fn test_gap_in_upgrade_timestamp() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_upgrade_timestamp(&[( + ProtocolUpgrade { + version: ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 0.into(), + }, + tx: None, + ..Default::default() + }, + 10, + )]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_versions = storage.protocol_versions_dal().all_versions().await; + // there should be genesis version and just added version + assert_eq!(db_versions.len(), 2); + + let previous_version = (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(); + let next_version = ProtocolVersionId::next(); + assert_eq!(db_versions[0].minor, previous_version); + assert_eq!(db_versions[1].minor, next_version); +} + +#[test_log::test(tokio::test)] +async fn test_normal_operation_upgrade_timestamp() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + + let mut client = MockEthClient::new(SLChainId(42)); + let mut watcher = EthWatch::new( + &chain_admin_contract(), + Box::new(client.clone()), + None, + connection_pool.clone(), + std::time::Duration::from_nanos(1), + L2ChainId::default(), + ) + .await + .unwrap(); + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_upgrade_timestamp(&[ + ( + ProtocolUpgrade { + tx: None, + ..Default::default() + }, + 10, + ), + ( + ProtocolUpgrade { + version: ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 0.into(), + }, + tx: Some(build_upgrade_tx(ProtocolVersionId::next(), 18)), + ..Default::default() + }, + 18, + ), + ( + ProtocolUpgrade { + version: ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 1.into(), + }, + tx: None, + ..Default::default() + }, + 19, + ), + ]) + .await; + client.set_last_finalized_block_number(15).await; + // The second upgrade will not be processed, as it has less than 5 confirmations. + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_versions = storage.protocol_versions_dal().all_versions().await; + // There should be genesis version and just added version. + assert_eq!(db_versions.len(), 2); + assert_eq!(db_versions[1].minor, ProtocolVersionId::latest()); + + client.set_last_finalized_block_number(20).await; + // Now the second and the third upgrades will be processed. + watcher.loop_iteration(&mut storage).await.unwrap(); + let db_versions = storage.protocol_versions_dal().all_versions().await; + let mut expected_version = ProtocolSemanticVersion { + minor: ProtocolVersionId::next(), + patch: 0.into(), + }; + assert_eq!(db_versions.len(), 4); + assert_eq!(db_versions[2], expected_version); + expected_version.patch += 1; + assert_eq!(db_versions[3], expected_version); + + // Check that tx was saved with the second upgrade. + let tx = storage + .protocol_versions_dal() + .get_protocol_upgrade_tx(ProtocolVersionId::next()) + .await + .unwrap() + .expect("no protocol upgrade transaction"); + assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); +} + +#[test_log::test(tokio::test)] +#[should_panic] +async fn test_gap_in_single_batch() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[ + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + build_l1_tx(3, 14), + build_l1_tx(5, 14), + ]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); +} + +#[test_log::test(tokio::test)] +#[should_panic] +async fn test_gap_between_batches() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[ + // this goes to the first batch + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + // this goes to the second batch + build_l1_tx(4, 20), + build_l1_tx(5, 22), + ]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 3); + client.set_last_finalized_block_number(25).await; + watcher.loop_iteration(&mut storage).await.unwrap(); +} + +#[test_log::test(tokio::test)] +async fn test_overlapping_batches() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + client + .add_transactions(&[ + // this goes to the first batch + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + // this goes to the second batch + build_l1_tx(1, 20), + build_l1_tx(2, 22), + build_l1_tx(3, 23), + build_l1_tx(4, 23), + ]) + .await; + client.set_last_finalized_block_number(15).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 3); + + client.set_last_finalized_block_number(25).await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 5); + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + let tx = db_txs[2].clone(); + assert_eq!(tx.common_data.serial_id.0, 2); + let tx = db_txs[4].clone(); + assert_eq!(tx.common_data.serial_id.0, 4); +} + +#[test_log::test(tokio::test)] +async fn test_transactions_get_gradually_processed_by_gateway() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut l1_client, mut gateway_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + l1_client + .add_transactions(&[ + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + build_l1_tx(3, 20), + build_l1_tx(4, 22), + ]) + .await; + l1_client.set_last_finalized_block_number(15).await; + gateway_client + .set_processed_priority_transactions_count(2) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 2); + + l1_client.set_last_finalized_block_number(25).await; + gateway_client + .set_processed_priority_transactions_count(4) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 4); + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + let tx = db_txs[2].clone(); + assert_eq!(tx.common_data.serial_id.0, 2); + let tx = db_txs[3].clone(); + assert_eq!(tx.common_data.serial_id.0, 3); +} + +#[test_log::test(tokio::test)] +async fn test_batch_root_processor_from_genesis() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + setup_batch_roots(&connection_pool, 0).await; + let (mut watcher, _, mut sl_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let batch_roots = batch_roots(); + sl_client + .add_batch_roots(&[ + (5, 1, batch_roots[0]), + (9, 2, batch_roots[1]), + (11, 3, batch_roots[2]), + ]) + .await; + sl_client + .add_chain_roots(&[ + ( + 5, + H256::from_slice( + &hex::decode( + "10a2ef76e709d318b459be49f1e8d7f02d7120f2b501bc0afddd935f1a813c67", + ) + .unwrap(), + ), + ), + ( + 9, + H256::from_slice( + &hex::decode( + "e0c3330f674b6b2d578f958a1dbd66f164d068b0bb5a9fb077eca013976fda6f", + ) + .unwrap(), + ), + ), + ( + 11, + H256::from_slice( + &hex::decode( + "d22fc9a7b005fefecd33bb56cdbf70bcc23610e693cd21295f9920227c2cb1cc", + ) + .unwrap(), + ), + ), + ]) + .await; + let chain_log_proofs = chain_log_proofs(); + sl_client.add_chain_log_proofs(chain_log_proofs).await; + + sl_client.set_last_finalized_block_number(5).await; + + let mut connection = connection_pool.connection().await.unwrap(); + watcher.loop_iteration(&mut connection).await.unwrap(); + + let proof1 = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(1)) + .await + .unwrap() + .unwrap(); + let proof1 = hex::encode(&bincode::serialize(&proof1).unwrap()); + assert_eq!(proof1, "000000000600000000000000420000000000000030783030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303030303030303030303030303030303030303030303030303030303030303530303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); + + sl_client.set_last_finalized_block_number(11).await; + watcher.loop_iteration(&mut connection).await.unwrap(); + + let proof2 = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(2)) + .await + .unwrap() + .unwrap(); + let proof2 = hex::encode(&bincode::serialize(&proof2).unwrap()); + assert_eq!(proof2, "0100000007000000000000004200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303031420000000000000030783130613265663736653730396433313862343539626534396631653864376630326437313230663262353031626330616664646439333566316138313363363742000000000000003078303030303030303030303030303030303030303030303030303030303030303930303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307861333738613230636132376237616533303731643162643763326164613030343639616263353765343239646436663438613833303932646237303539613138"); + + let proof3 = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(3)) + .await + .unwrap() + .unwrap(); + let proof3 = hex::encode(&bincode::serialize(&proof3).unwrap()); + assert_eq!(proof3, "02000000080000000000000042000000000000003078303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030324200000000000000307834363730306234643430616335633335616632633232646461323738376139316562353637623036633932346138666238616539613035623230633038633231420000000000000030786530633333333066363734623662326435373866393538613164626436366631363464303638623062623561396662303737656361303133393736666461366642000000000000003078303030303030303030303030303030303030303030303030303030303030306230303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); +} + +#[test_log::test(tokio::test)] +async fn test_batch_root_processor_restart() { + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + setup_batch_roots(&connection_pool, 2).await; + let (mut watcher, _, mut sl_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let batch_roots = batch_roots(); + sl_client + .add_batch_roots(&[ + (11, 3, batch_roots[2]), + (13, 4, batch_roots[3]), + (14, 5, batch_roots[4]), + (14, 6, batch_roots[5]), + ]) + .await; + sl_client + .add_chain_roots(&[ + ( + 11, + H256::from_slice( + &hex::decode( + "d22fc9a7b005fefecd33bb56cdbf70bcc23610e693cd21295f9920227c2cb1cc", + ) + .unwrap(), + ), + ), + ( + 13, + H256::from_slice( + &hex::decode( + "53edc1f5ad79c5999bd578dfc135f9c51ebd7fafa4585b64f71d15b2dce1b728", + ) + .unwrap(), + ), + ), + ( + 14, + H256::from_slice( + &hex::decode( + "61b35796307159a6da8aa45448e6941e3438380582e2f3cb358db59598ae156f", + ) + .unwrap(), + ), + ), + ]) + .await; + let chain_log_proofs = chain_log_proofs(); + sl_client.add_chain_log_proofs(chain_log_proofs).await; + + sl_client.set_last_finalized_block_number(14).await; + + let mut connection = connection_pool.connection().await.unwrap(); + watcher.loop_iteration(&mut connection).await.unwrap(); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(3)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "02000000080000000000000042000000000000003078303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030324200000000000000307834363730306234643430616335633335616632633232646461323738376139316562353637623036633932346138666238616539613035623230633038633231420000000000000030786530633333333066363734623662326435373866393538613164626436366631363464303638623062623561396662303737656361303133393736666461366642000000000000003078303030303030303030303030303030303030303030303030303030303030306230303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(4)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "02000000080000000000000042000000000000003078303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030334200000000000000307837623765373735373139343639366666393634616233353837393131373362636337663735356132656161393334653935373061636533393139383435313265420000000000000030786530633333333066363734623662326435373866393538613164626436366631363464303638623062623561396662303737656361303133393736666461366642000000000000003078303030303030303030303030303030303030303030303030303030303030306430303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307835353063313735316338653764626166633839303939326634353532333636663064643565623665343362653535353936386264616338633732656466316261"); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(5)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "030000000900000000000000420000000000000030783030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303442000000000000003078303235663065363031353230366661626364326263613930316432633438396536336263356564346231356266356330633963363066396531363735383564614200000000000000307863633463343165646230633230333133343862323932623736386539626163316565386339326330396566386133323737633265636534303963313264383661420000000000000030783533656463316635616437396335393939626435373864666331333566396335316562643766616661343538356236346637316431356232646365316237323842000000000000003078303030303030303030303030303030303030303030303030303030303030306530303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); + + let proof = connection + .blocks_dal() + .get_l1_batch_chain_merkle_path(L1BatchNumber(6)) + .await + .unwrap() + .unwrap(); + let proof = hex::encode(&bincode::serialize(&proof).unwrap()); + assert_eq!(proof, "030000000900000000000000420000000000000030783030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303542000000000000003078323465653435363834376535373364313635613832333634306632303834383139636331613865333433316562633635633865363064333435343266313637324200000000000000307863633463343165646230633230333133343862323932623736386539626163316565386339326330396566386133323737633265636534303963313264383661420000000000000030783533656463316635616437396335393939626435373864666331333566396335316562643766616661343538356236346637316431356232646365316237323842000000000000003078303030303030303030303030303030303030303030303030303030303030306530303030303030303030303030303030303030303030303030303030303030334200000000000000307830303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030316639420000000000000030783031303230303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303042000000000000003078303932343932386331333737613663663234633339633264343666386562396466323365383131623236646333353237653534383339366664346531373362314200000000000000307833373561356266393039636230323134336533363935636136353865303634316537333961613539306630303034646261393335373263343463646239643264"); +} + +async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec { + storage.transactions_dal().reset_mempool().await.unwrap(); + storage + .transactions_dal() + .sync_mempool(&[], &[], 0, 0, 1000) + .await + .unwrap() +} + +async fn setup_db(connection_pool: &ConnectionPool) { + connection_pool + .connection() + .await + .unwrap() + .protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion { + version: ProtocolSemanticVersion { + minor: (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), + patch: 0.into(), + }, + ..Default::default() + }) + .await + .unwrap(); +} + +fn batch_roots() -> Vec { + [ + "5EEBBC173358620F7F61B69D80AFE503F76190396918EB7B27CEF4DB7C51D60A", + "B7E66115CDAAF5FFE70B53EF0AC6D0FF7D7BEB4341FEC6352A670B805AE15935", + "09BD2AD9C01C05F760BBEC6E59BF728566551B48C0DCBD01DB797D1C703122F8", + "B6E530FF878093B2D0CAF87780451A8F07922570E2D820B7A8541114E0D70FB5", + "B4F195844BA1792F3C1FB57C826B2DA60EA6EEBB90BF53F706120E49BB0486EF", + "118F6FAC96824D4E0845F7C7DF716969378F3F2038D9E9D0FEAD1FE01BA11A93", + ] + .into_iter() + .map(|s| H256::from_slice(&hex::decode(s).unwrap())) + .collect() +} + +fn chain_log_proofs() -> Vec<(L1BatchNumber, ChainAggProof)> { + vec![ + ( + L1BatchNumber(5), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "375a5bf909cb02143e3695ca658e0641e739aa590f0004dba93572c44cdb9d2d", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(9), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "a378a20ca27b7ae3071d1bd7c2ada00469abc57e429dd6f48a83092db7059a18", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(11), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "375a5bf909cb02143e3695ca658e0641e739aa590f0004dba93572c44cdb9d2d", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(13), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "550c1751c8e7dbafc890992f4552366f0dd5eb6e43be555968bdac8c72edf1ba", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ( + L1BatchNumber(14), + ChainAggProof { + chain_id_leaf_proof: vec![ + H256::from_slice( + &hex::decode( + "0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + ) + .unwrap(), + ), + H256::from_slice( + &hex::decode( + "375a5bf909cb02143e3695ca658e0641e739aa590f0004dba93572c44cdb9d2d", + ) + .unwrap(), + ), + ], + chain_id_leaf_proof_mask: 3u32.into(), + }, + ), + ] +} + +async fn setup_batch_roots( + connection_pool: &ConnectionPool, + number_of_processed_batches: usize, +) { + let batch_roots = batch_roots(); + + let mut connection = connection_pool.connection().await.unwrap(); + + assert!(number_of_processed_batches <= batch_roots.len()); + for (i, root) in batch_roots.into_iter().enumerate() { + let batch_number = L1BatchNumber(i as u32 + 1); + let header = L1BatchHeader::new( + batch_number, + i as u64, + Default::default(), + (ProtocolVersionId::latest() as u16 - 1).try_into().unwrap(), + ); + connection + .blocks_dal() + .insert_mock_l1_batch(&header) + .await + .unwrap(); + connection + .blocks_dal() + .save_l1_batch_commitment_artifacts( + batch_number, + &L1BatchCommitmentArtifacts { + l2_l1_merkle_root: root, + ..Default::default() + }, + ) + .await + .unwrap(); + + let eth_tx_id = connection + .eth_sender_dal() + .save_eth_tx( + i as u64, + Default::default(), + AggregatedActionType::Execute, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + true, + ) + .await + .unwrap() + .id; + connection + .eth_sender_dal() + .set_chain_id(eth_tx_id, SL_CHAIN_ID.0) + .await + .unwrap(); + connection + .blocks_dal() + .set_eth_tx_id( + batch_number..=batch_number, + eth_tx_id, + AggregatedActionType::Execute, + ) + .await + .unwrap(); + + if i < number_of_processed_batches { + connection + .blocks_dal() + .set_batch_chain_merkle_path( + batch_number, + BatchAndChainMerklePath { + batch_proof_len: 0, + proof: Vec::new(), + }, + ) + .await + .unwrap() + } + } +} diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index f8789596f9d..97c054b7fe6 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,11 +1,11 @@ use zksync_config::{configs::gateway::GatewayChainConfig, ContractsConfig, EthWatchConfig}; use zksync_contracts::chain_admin_contract; -use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; -use zksync_types::settlement::SettlementMode; +use zksync_eth_watch::{EthHttpQueryClient, EthWatch, L2EthClient}; +use zksync_types::{settlement::SettlementMode, L2ChainId}; use crate::{ implementations::resources::{ - eth_interface::{EthInterfaceResource, GatewayEthInterfaceResource}, + eth_interface::{EthInterfaceResource, L2InterfaceResource}, pools::{MasterPool, PoolResource}, }, service::StopReceiver, @@ -24,6 +24,7 @@ pub struct EthWatchLayer { contracts_config: ContractsConfig, gateway_contracts_config: Option, settlement_mode: SettlementMode, + chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -31,7 +32,7 @@ pub struct EthWatchLayer { pub struct Input { pub master_pool: PoolResource, pub eth_client: EthInterfaceResource, - pub gateway_client: Option, + pub gateway_client: Option, } #[derive(Debug, IntoContext)] @@ -47,12 +48,14 @@ impl EthWatchLayer { contracts_config: ContractsConfig, gateway_contracts_config: Option, settlement_mode: SettlementMode, + chain_id: L2ChainId, ) -> Self { Self { eth_watch_config, contracts_config, gateway_contracts_config, settlement_mode, + chain_id, } } } @@ -97,27 +100,28 @@ impl WiringLayer for EthWatchLayer { self.eth_watch_config.confirmations_for_eth_event, ); - let sl_client = if self.settlement_mode.is_gateway() { + let sl_l2_client: Option> = if self.settlement_mode.is_gateway() { let gateway_client = input.gateway_client.unwrap().0; let contracts_config = self.gateway_contracts_config.unwrap(); - EthHttpQueryClient::new( + Some(Box::new(EthHttpQueryClient::new( gateway_client, contracts_config.diamond_proxy_addr, Some(contracts_config.state_transition_proxy_addr), contracts_config.chain_admin_addr, contracts_config.governance_addr, self.eth_watch_config.confirmations_for_eth_event, - ) + ))) } else { - l1_client.clone() + None }; let eth_watch = EthWatch::new( &chain_admin_contract(), Box::new(l1_client), - Box::new(sl_client), + sl_l2_client, main_pool, self.eth_watch_config.poll_interval(), + self.chain_id, ) .await?; diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs index 390d321647c..6a006a663c3 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs @@ -205,6 +205,7 @@ impl WiringLayer for Web3ServerLayer { let bridge_addresses_updater_task = input .main_node_client + .clone() .map(|main_node_client| BridgeAddressesUpdaterTask { bridge_address_updater: bridge_addresses_handle.clone(), main_node_client: main_node_client.0, @@ -233,6 +234,9 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } + if let Some(main_node_client) = input.main_node_client { + api_builder = api_builder.with_l2_l1_log_proof_handler(main_node_client.0) + } let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); diff --git a/core/tests/ts-integration/src/helpers.ts b/core/tests/ts-integration/src/helpers.ts index 354dfe64fdf..299e3085bf8 100644 --- a/core/tests/ts-integration/src/helpers.ts +++ b/core/tests/ts-integration/src/helpers.ts @@ -133,6 +133,14 @@ export async function waitForBlockToBeFinalizedOnL1(wallet: zksync.Wallet, block } } +export async function waitForL2ToL1LogProof(wallet: zksync.Wallet, blockNumber: number, txHash: string) { + await waitForBlockToBeFinalizedOnL1(wallet, blockNumber); + + while ((await wallet.provider.getLogProof(txHash)) == null) { + await zksync.utils.sleep(wallet.provider.pollingInterval); + } +} + /** * Returns an increased gas price to decrease chances of L1 transactions being stuck * diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 5abae0b89d3..65b3f975100 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -7,7 +7,7 @@ import { Token } from '../src/types'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; +import { scaledGasPrice, waitForL2ToL1LogProof } from '../src/helpers'; const SECONDS = 2000; jest.setTimeout(100 * SECONDS); @@ -169,7 +169,7 @@ describe('base ERC20 contract checks', () => { await expect(withdrawalPromise).toBeAccepted([]); const withdrawalTx = await withdrawalPromise; const l2Receipt = await withdrawalTx.wait(); - await waitForBlockToBeFinalizedOnL1(alice, l2Receipt!.blockNumber); + await waitForL2ToL1LogProof(alice, l2Receipt!.blockNumber, withdrawalTx.hash); await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted([]); const receipt = await alice._providerL2().getTransactionReceipt(withdrawalTx.hash); diff --git a/core/tests/ts-integration/tests/erc20.test.ts b/core/tests/ts-integration/tests/erc20.test.ts index 382c625ac70..a0345fb71ab 100644 --- a/core/tests/ts-integration/tests/erc20.test.ts +++ b/core/tests/ts-integration/tests/erc20.test.ts @@ -8,7 +8,7 @@ import { shouldChangeTokenBalances, shouldOnlyTakeFee } from '../src/modifiers/b import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; +import { scaledGasPrice, waitForL2ToL1LogProof } from '../src/helpers'; import { L2_DEFAULT_ETH_PER_ACCOUNT } from '../src/context-owner'; describe('L1 ERC20 contract checks', () => { @@ -175,7 +175,7 @@ describe('L1 ERC20 contract checks', () => { await expect(withdrawalPromise).toBeAccepted([l2BalanceChange, feeCheck]); const withdrawalTx = await withdrawalPromise; const l2TxReceipt = await alice.provider.getTransactionReceipt(withdrawalTx.hash); - await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); + await waitForL2ToL1LogProof(alice, l2TxReceipt!.blockNumber, withdrawalTx.hash); // Note: For L1 we should use L1 token address. const l1BalanceChange = await shouldChangeTokenBalances( @@ -216,7 +216,7 @@ describe('L1 ERC20 contract checks', () => { // It throws once it gets status == 0 in the receipt and doesn't wait for the finalization. const l2Hash = zksync.utils.getL2HashFromPriorityOp(l1Receipt, await alice.provider.getMainContractAddress()); const l2TxReceipt = await alice.provider.getTransactionReceipt(l2Hash); - await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); + await waitForL2ToL1LogProof(alice, l2TxReceipt!.blockNumber, l2Hash); // Claim failed deposit. await expect(alice.claimFailedDeposit(l2Hash)).toBeAccepted(); await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.toEqual(initialBalance); diff --git a/core/tests/ts-integration/tests/ether.test.ts b/core/tests/ts-integration/tests/ether.test.ts index abc9237025a..f6dc9f36ae0 100644 --- a/core/tests/ts-integration/tests/ether.test.ts +++ b/core/tests/ts-integration/tests/ether.test.ts @@ -11,7 +11,7 @@ import { import { checkReceipt } from '../src/modifiers/receipt-check'; import * as zksync from 'zksync-ethers'; -import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; +import { scaledGasPrice, waitForL2ToL1LogProof } from '../src/helpers'; import { ethers } from 'ethers'; describe('ETH token checks', () => { @@ -255,7 +255,7 @@ describe('ETH token checks', () => { await expect(withdrawalPromise).toBeAccepted([l2ethBalanceChange]); const withdrawalTx = await withdrawalPromise; const l2TxReceipt = await alice.provider.getTransactionReceipt(withdrawalTx.hash); - await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); + await waitForL2ToL1LogProof(alice, l2TxReceipt!.blockNumber, withdrawalTx.hash); // TODO (SMA-1374): Enable L1 ETH checks as soon as they're supported. await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted(); diff --git a/core/tests/ts-integration/tests/l2-erc20.test.ts b/core/tests/ts-integration/tests/l2-erc20.test.ts index cc07ebb9c47..f1c89b1c05f 100644 --- a/core/tests/ts-integration/tests/l2-erc20.test.ts +++ b/core/tests/ts-integration/tests/l2-erc20.test.ts @@ -9,7 +9,7 @@ import { shouldChangeTokenBalances, shouldOnlyTakeFee } from '../src/modifiers/b import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { Provider, Wallet } from 'ethers'; -import { scaledGasPrice, deployContract, readContract, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; +import { scaledGasPrice, deployContract, readContract, waitForL2ToL1LogProof } from '../src/helpers'; describe('L2 native ERC20 contract checks', () => { let testMaster: TestMaster; @@ -101,7 +101,7 @@ describe('L2 native ERC20 contract checks', () => { const withdrawalTx = await withdrawalPromise; const l2TxReceipt = await alice.provider.getTransactionReceipt(withdrawalTx.hash); await withdrawalTx.waitFinalize(); - await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); + await waitForL2ToL1LogProof(alice, l2TxReceipt!.blockNumber, withdrawalTx.hash); await alice.finalizeWithdrawalParams(withdrawalTx.hash); // kl todo finalize the Withdrawals with the params here. Alternatively do in the SDK. await expect(alice.finalizeWithdrawal(withdrawalTx.hash)).toBeAccepted(); @@ -171,7 +171,8 @@ describe('L2 native ERC20 contract checks', () => { // It throws once it gets status == 0 in the receipt and doesn't wait for the finalization. const l2Hash = zksync.utils.getL2HashFromPriorityOp(l1Receipt, await alice.provider.getMainContractAddress()); const l2TxReceipt = await alice.provider.getTransactionReceipt(l2Hash); - await waitForBlockToBeFinalizedOnL1(alice, l2TxReceipt!.blockNumber); + await waitForL2ToL1LogProof(alice, l2TxReceipt!.blockNumber, l2Hash); + // Claim failed deposit. await expect(alice.claimFailedDeposit(l2Hash)).toBeAccepted(); await expect(alice.getBalanceL1(tokenDetails.l1Address)).resolves.toEqual(initialBalance); diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 5abee904765..f2a9968a90a 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -41,7 +41,7 @@ api: estimate_gas_scale_factor: 1.3 estimate_gas_acceptable_overestimation: 5000 max_tx_size: 1000000 - api_namespaces: [ en,eth,net,web3,zks,pubsub,debug ] + api_namespaces: [ en,eth,net,web3,zks,pubsub,debug,unstable ] state_keeper: transaction_slots: 8192 max_allowed_l2_tx_gas_limit: 15000000000 diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh index 3ea3980e68f..c5c8987d85b 100644 --- a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -15,7 +15,7 @@ _zkstack() { local context curcontext="$curcontext" state line _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -37,7 +37,7 @@ _arguments "${_arguments_options[@]}" : \ '--generate=[The shell to generate the autocomplete script for]:GENERATOR:(bash elvish fish powershell zsh)' \ '-o+[The out directory to write the autocomplete script to]:OUT:_files' \ '--out=[The out directory to write the autocomplete script to]:OUT:_files' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -47,7 +47,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (ecosystem) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -65,11 +65,11 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (create) _arguments "${_arguments_options[@]}" : \ -'--ecosystem-name=[]:ECOSYSTEM_NAME: ' \ +'--ecosystem-name=[]:ECOSYSTEM_NAME:_default' \ '--l1-network=[L1 Network]:L1_NETWORK:(localhost sepolia holesky mainnet)' \ '--link-to-code=[Code link]:LINK_TO_CODE:_files -/' \ -'--chain-name=[]:CHAIN_NAME: ' \ -'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--chain-name=[]:CHAIN_NAME:_default' \ +'--chain-id=[Chain ID]:CHAIN_ID:_default' \ '--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ '--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" random\:"Generate random wallets" @@ -77,13 +77,13 @@ empty\:"Generate placeholder wallets" in-file\:"Specify file with wallets"))' \ '--wallet-path=[Wallet path]:WALLET_PATH:_files' \ '--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ -'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ -'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ -'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS:_default' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR:_default' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR:_default' \ '--set-as-default=[Set as default chain]' \ '--evm-emulator=[Enable EVM emulator]' \ '--start-containers=[Start reth and postgres containers after creation]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--legacy-bridge[]' \ '--skip-submodules-checkout[Skip submodules checkout]' \ '--skip-contract-compilation-override[Skip contract compilation override]' \ @@ -96,17 +96,17 @@ in-file\:"Specify file with wallets"))' \ ;; (build-transactions) _arguments "${_arguments_options[@]}" : \ -'--sender=[Address of the transaction sender]:SENDER: ' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--sender=[Address of the transaction sender]:SENDER:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ '-o+[Output directory for the generated files]:OUT:_files' \ '--out=[Output directory for the generated files]:OUT:_files' \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -121,19 +121,19 @@ _arguments "${_arguments_options[@]}" : \ '--deploy-erc20=[Deploy ERC20 contracts]' \ '--deploy-ecosystem=[Deploy ecosystem contracts]' \ '--ecosystem-contracts-path=[Path to ecosystem contracts]:ECOSYSTEM_CONTRACTS_PATH:_files' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ '--deploy-paymaster=[Deploy Paymaster contract]' \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ '-o+[Enable Grafana]' \ '--observability=[Enable Grafana]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-d[]' \ @@ -152,18 +152,18 @@ _arguments "${_arguments_options[@]}" : \ ;; (change-default-chain) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ '-h[Print help]' \ '--help[Print help]' \ -'::name:' \ +'::name:_default' \ && ret=0 ;; (setup-observability) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -217,7 +217,7 @@ esac ;; (chain) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -235,8 +235,8 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (create) _arguments "${_arguments_options[@]}" : \ -'--chain-name=[]:CHAIN_NAME: ' \ -'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--chain-name=[]:CHAIN_NAME:_default' \ +'--chain-id=[Chain ID]:CHAIN_ID:_default' \ '--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ '--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" random\:"Generate random wallets" @@ -244,12 +244,12 @@ empty\:"Generate placeholder wallets" in-file\:"Specify file with wallets"))' \ '--wallet-path=[Wallet path]:WALLET_PATH:_files' \ '--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ -'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ -'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ -'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS:_default' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR:_default' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR:_default' \ '--set-as-default=[Set as default chain]' \ '--evm-emulator=[Enable EVM emulator]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--legacy-bridge[]' \ '--skip-submodules-checkout[Skip submodules checkout]' \ '--skip-contract-compilation-override[Skip contract compilation override]' \ @@ -266,12 +266,12 @@ _arguments "${_arguments_options[@]}" : \ '--out=[Output directory for the generated files]:OUT:_files' \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -285,15 +285,15 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ '--deploy-paymaster=[]' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-d[]' \ @@ -318,10 +318,10 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (configs) _arguments "${_arguments_options[@]}" : \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ -'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-d[Use default database urls and names]' \ '--dev[Use default database urls and names]' \ '-d[]' \ @@ -364,9 +364,9 @@ esac ;; (genesis) _arguments "${_arguments_options[@]}" : \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-d[Use default database urls and names]' \ '--dev[Use default database urls and names]' \ '-d[]' \ @@ -388,9 +388,9 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (init-database) _arguments "${_arguments_options[@]}" : \ -'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ -'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL:_default' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-d[Use default database urls and names]' \ '--dev[Use default database urls and names]' \ '-d[]' \ @@ -404,7 +404,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (server) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -448,11 +448,11 @@ esac _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -466,11 +466,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -484,11 +484,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -502,11 +502,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -520,11 +520,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -538,11 +538,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -556,11 +556,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -574,11 +574,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -592,11 +592,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -610,11 +610,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -628,12 +628,12 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -647,12 +647,12 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--verify=[Verify deployed contracts]' \ '--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ -'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ -'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ -'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL:_default' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY:_default' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--resume[]' \ '--zksync[]' \ '-v[Verbose mode]' \ @@ -788,7 +788,7 @@ esac ;; (dev) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -806,7 +806,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (database) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -826,11 +826,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -842,11 +842,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -858,11 +858,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -873,8 +873,8 @@ _arguments "${_arguments_options[@]}" : \ (new-migration) _arguments "${_arguments_options[@]}" : \ '--database=[Database to create new migration for]:DATABASE:(prover core)' \ -'--name=[Migration name]:NAME: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--name=[Migration name]:NAME:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -886,11 +886,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -902,11 +902,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -918,11 +918,11 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '-p+[Prover database]' \ '--prover=[Prover database]' \ -'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL:_default' \ '-c+[Core database]' \ '--core=[Core database]' \ -'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -984,7 +984,7 @@ esac ;; (test) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1002,9 +1002,9 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (integration) _arguments "${_arguments_options[@]}" : \ -'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ -'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN:_default' \ +'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-e[Run tests for external node]' \ '--external-node[Run tests for external node]' \ '-n[Do not install or build dependencies]' \ @@ -1018,7 +1018,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (fees) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-n[Do not install or build dependencies]' \ '--no-deps[Do not install or build dependencies]' \ '--no-kill[The test will not kill all the nodes during execution]' \ @@ -1031,7 +1031,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (revert) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--enable-consensus[Enable consensus]' \ '-e[Run tests for external node]' \ '--external-node[Run tests for external node]' \ @@ -1047,7 +1047,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (recovery) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-s[Run recovery from a snapshot instead of genesis]' \ '--snapshot[Run recovery from a snapshot instead of genesis]' \ '-n[Do not install or build dependencies]' \ @@ -1062,7 +1062,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (upgrade) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-n[Do not install or build dependencies]' \ '--no-deps[Do not install or build dependencies]' \ '-v[Verbose mode]' \ @@ -1074,7 +1074,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (build) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1084,8 +1084,8 @@ _arguments "${_arguments_options[@]}" : \ ;; (rust) _arguments "${_arguments_options[@]}" : \ -'--options=[Cargo test flags]:OPTIONS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--options=[Cargo test flags]:OPTIONS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1095,7 +1095,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (l1-contracts) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1105,7 +1105,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (prover) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1115,7 +1115,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (wallet) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1125,7 +1125,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (loadtest) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1203,7 +1203,7 @@ esac ;; (clean) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1221,7 +1221,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (all) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1231,7 +1231,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (containers) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1241,7 +1241,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (contracts-cache) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1287,7 +1287,7 @@ esac ;; (snapshot) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1305,7 +1305,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (create) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1345,7 +1345,7 @@ esac _arguments "${_arguments_options[@]}" : \ '*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ '*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-c[]' \ '--check[]' \ '-v[Verbose mode]' \ @@ -1357,7 +1357,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (fmt) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-c[]' \ '--check[]' \ '-v[Verbose mode]' \ @@ -1377,7 +1377,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (rustfmt) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1387,7 +1387,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (contract) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1399,7 +1399,7 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ '*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1445,7 +1445,7 @@ esac ;; (prover) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1463,7 +1463,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (info) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1473,9 +1473,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (insert-batch) _arguments "${_arguments_options[@]}" : \ -'--number=[]:NUMBER: ' \ -'--version=[]:VERSION: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--number=[]:NUMBER:_default' \ +'--version=[]:VERSION:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--default[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -1486,9 +1486,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (insert-version) _arguments "${_arguments_options[@]}" : \ -'--version=[]:VERSION: ' \ -'--snark-wrapper=[]:SNARK_WRAPPER: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--version=[]:VERSION:_default' \ +'--snark-wrapper=[]:SNARK_WRAPPER:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--default[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -1540,7 +1540,7 @@ _arguments "${_arguments_options[@]}" : \ '--l2-contracts=[Build L2 contracts]' \ '--system-contracts=[Build system contracts]' \ '--test-contracts=[Build test contracts]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1550,9 +1550,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (config-writer) _arguments "${_arguments_options[@]}" : \ -'-p+[Path to the config file to override]:PATH: ' \ -'--path=[Path to the config file to override]:PATH: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'-p+[Path to the config file to override]:PATH:_default' \ +'--path=[Path to the config file to override]:PATH:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1563,10 +1563,10 @@ _arguments "${_arguments_options[@]}" : \ (send-transactions) _arguments "${_arguments_options[@]}" : \ '--file=[]:FILE:_files' \ -'--private-key=[]:PRIVATE_KEY: ' \ -'--l1-rpc-url=[]:L1_RPC_URL: ' \ -'--confirmations=[]:CONFIRMATIONS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--private-key=[]:PRIVATE_KEY:_default' \ +'--l1-rpc-url=[]:L1_RPC_URL:_default' \ +'--confirmations=[]:CONFIRMATIONS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1576,9 +1576,9 @@ _arguments "${_arguments_options[@]}" : \ ;; (status) _arguments "${_arguments_options[@]}" : \ -'-u+[URL of the health check endpoint]:URL: ' \ -'--url=[URL of the health check endpoint]:URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'-u+[URL of the health check endpoint]:URL:_default' \ +'--url=[URL of the health check endpoint]:URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1596,7 +1596,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (ports) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1634,7 +1634,7 @@ esac ;; (generate-genesis) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1916,7 +1916,7 @@ esac ;; (prover) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1934,35 +1934,35 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (init) _arguments "${_arguments_options[@]}" : \ -'--proof-store-dir=[]:PROOF_STORE_DIR: ' \ -'--bucket-base-url=[]:BUCKET_BASE_URL: ' \ -'--credentials-file=[]:CREDENTIALS_FILE: ' \ -'--bucket-name=[]:BUCKET_NAME: ' \ -'--location=[]:LOCATION: ' \ -'--project-id=[]:PROJECT_ID: ' \ +'--proof-store-dir=[]:PROOF_STORE_DIR:_default' \ +'--bucket-base-url=[]:BUCKET_BASE_URL:_default' \ +'--credentials-file=[]:CREDENTIALS_FILE:_default' \ +'--bucket-name=[]:BUCKET_NAME:_default' \ +'--location=[]:LOCATION:_default' \ +'--project-id=[]:PROJECT_ID:_default' \ '--shall-save-to-public-bucket=[]:SHALL_SAVE_TO_PUBLIC_BUCKET:(true false)' \ -'--public-store-dir=[]:PUBLIC_STORE_DIR: ' \ -'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL: ' \ -'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE: ' \ -'--public-bucket-name=[]:PUBLIC_BUCKET_NAME: ' \ -'--public-location=[]:PUBLIC_LOCATION: ' \ -'--public-project-id=[]:PUBLIC_PROJECT_ID: ' \ -'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--public-store-dir=[]:PUBLIC_STORE_DIR:_default' \ +'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL:_default' \ +'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE:_default' \ +'--public-bucket-name=[]:PUBLIC_BUCKET_NAME:_default' \ +'--public-location=[]:PUBLIC_LOCATION:_default' \ +'--public-project-id=[]:PUBLIC_PROJECT_ID:_default' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR:_default' \ '--bellman-cuda=[]' \ '--setup-compressor-key=[]' \ -'--path=[]:PATH: ' \ +'--path=[]:PATH:_default' \ '--region=[]:REGION:(us europe asia)' \ '--mode=[]:MODE:(download generate)' \ '--setup-keys=[]' \ '--setup-database=[]:SETUP_DATABASE:(true false)' \ -'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL: ' \ -'--prover-db-name=[Prover database name]:PROVER_DB_NAME: ' \ +'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL:_default' \ +'--prover-db-name=[Prover database name]:PROVER_DB_NAME:_default' \ '-u+[Use default database urls and names]:USE_DEFAULT:(true false)' \ '--use-default=[Use default database urls and names]:USE_DEFAULT:(true false)' \ '-d+[]:DONT_DROP:(true false)' \ '--dont-drop=[]:DONT_DROP:(true false)' \ '--cloud-type=[]:CLOUD_TYPE:(gcp local)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--dev[]' \ '(--bellman-cuda-dir)--clone[]' \ '-v[Verbose mode]' \ @@ -1976,7 +1976,7 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--region=[]:REGION:(us europe asia)' \ '--mode=[]:MODE:(download generate)' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -1988,13 +1988,13 @@ _arguments "${_arguments_options[@]}" : \ _arguments "${_arguments_options[@]}" : \ '--component=[]:COMPONENT:(gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor)' \ '--round=[]:ROUND:(all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler)' \ -'--threads=[]:THREADS: ' \ -'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION: ' \ -'--witness-vector-generator-count=[]:WITNESS_VECTOR_GENERATOR_COUNT: ' \ -'--max-allocation=[]:MAX_ALLOCATION: ' \ +'--threads=[]:THREADS:_default' \ +'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION:_default' \ +'--witness-vector-generator-count=[]:WITNESS_VECTOR_GENERATOR_COUNT:_default' \ +'--max-allocation=[]:MAX_ALLOCATION:_default' \ '--docker=[]:DOCKER:(true false)' \ -'--tag=[]:TAG: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--tag=[]:TAG:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2004,8 +2004,8 @@ _arguments "${_arguments_options[@]}" : \ ;; (init-bellman-cuda) _arguments "${_arguments_options[@]}" : \ -'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '(--bellman-cuda-dir)--clone[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -2016,8 +2016,8 @@ _arguments "${_arguments_options[@]}" : \ ;; (compressor-keys) _arguments "${_arguments_options[@]}" : \ -'--path=[]:PATH: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--path=[]:PATH:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2071,10 +2071,10 @@ esac ;; (server) _arguments "${_arguments_options[@]}" : \ -'*--components=[Components of server to run]:COMPONENTS: ' \ -'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'*--components=[Components of server to run]:COMPONENTS:_default' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--genesis[Run server in genesis mode]' \ '--build[Build server but don'\''t run it]' \ '--uring[Enables uring support for RocksDB]' \ @@ -2087,7 +2087,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (external-node) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2105,10 +2105,10 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (configs) _arguments "${_arguments_options[@]}" : \ -'--db-url=[]:DB_URL: ' \ -'--db-name=[]:DB_NAME: ' \ -'--l1-rpc-url=[]:L1_RPC_URL: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--db-url=[]:DB_URL:_default' \ +'--db-name=[]:DB_NAME:_default' \ +'--l1-rpc-url=[]:L1_RPC_URL:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-u[Use default database urls and names]' \ '--use-default[Use default database urls and names]' \ '-v[Verbose mode]' \ @@ -2120,7 +2120,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (init) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2130,11 +2130,11 @@ _arguments "${_arguments_options[@]}" : \ ;; (run) _arguments "${_arguments_options[@]}" : \ -'*--components=[Components of server to run]:COMPONENTS: ' \ +'*--components=[Components of server to run]:COMPONENTS:_default' \ '--enable-consensus=[Enable consensus]' \ -'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--reinit[]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -2183,7 +2183,7 @@ esac _arguments "${_arguments_options[@]}" : \ '-o+[Enable Grafana]' \ '--observability=[Enable Grafana]' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2193,7 +2193,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (contract-verifier) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2211,7 +2211,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (run) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2221,12 +2221,12 @@ _arguments "${_arguments_options[@]}" : \ ;; (init) _arguments "${_arguments_options[@]}" : \ -'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION: ' \ -'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION: ' \ -'--solc-version=[Version of solc to install]:SOLC_VERSION: ' \ -'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION: ' \ -'--vyper-version=[Version of vyper to install]:VYPER_VERSION: ' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION:_default' \ +'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION:_default' \ +'--solc-version=[Version of solc to install]:SOLC_VERSION:_default' \ +'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION:_default' \ +'--vyper-version=[Version of vyper to install]:VYPER_VERSION:_default' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--only[Install only provided compilers]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -2269,7 +2269,7 @@ esac ;; (portal) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2279,7 +2279,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (explorer) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2297,7 +2297,7 @@ _arguments "${_arguments_options[@]}" : \ case $line[1] in (init) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2307,7 +2307,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (run-backend) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2317,7 +2317,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (run) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2363,7 +2363,7 @@ esac ;; (consensus) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2382,7 +2382,7 @@ _arguments "${_arguments_options[@]}" : \ (set-attester-committee) _arguments "${_arguments_options[@]}" : \ '--from-file=[Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in \`commands/consensus/proto/mod.proto\`]:FROM_FILE:_files' \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '--from-genesis[Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml]' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ @@ -2393,7 +2393,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (get-attester-committee) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \ @@ -2435,7 +2435,7 @@ esac ;; (update) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-c[Update only the config files]' \ '--only-config[Update only the config files]' \ '-v[Verbose mode]' \ @@ -2447,7 +2447,7 @@ _arguments "${_arguments_options[@]}" : \ ;; (markdown) _arguments "${_arguments_options[@]}" : \ -'--chain=[Chain to use]:CHAIN: ' \ +'--chain=[Chain to use]:CHAIN:_default' \ '-v[Verbose mode]' \ '--verbose[Verbose mode]' \ '--ignore-prerequisites[Ignores prerequisites checks]' \